repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
imgclsmob | imgclsmob-master/keras_/kerascv/models/shufflenet.py | """
ShuffleNet for ImageNet-1K, implemented in Keras.
Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
"""
__all__ = ['shufflenet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1',
'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2',
'shufflenet_g1_wd4', 'shufflenet_g3_wd4']
import os
from keras import layers as nn
from keras.models import Model
from .common import conv1x1, conv3x3, depthwise_conv3x3, channel_shuffle_lambda, batchnorm, maxpool2d, avgpool2d,\
is_channels_first, get_channel_axis, flatten
def shuffle_unit(x,
in_channels,
out_channels,
groups,
downsample,
ignore_group,
name="shuffle_unit"):
"""
ShuffleNet unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
name : str, default 'shuffle_unit'
Unit name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
identity = x
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
name=name + "/compress_conv1")
x = batchnorm(
x=x,
name=name + "/compress_bn1")
x = nn.Activation("relu", name=name + "/activ")(x)
x = channel_shuffle_lambda(
channels=mid_channels,
groups=groups,
name=name + "/c_shuffle")(x)
x = depthwise_conv3x3(
x=x,
channels=mid_channels,
strides=(2 if downsample else 1),
name=name + "/dw_conv2")
x = batchnorm(
x=x,
name=name + "/dw_bn2")
x = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
name=name + "/expand_conv3")
x = batchnorm(
x=x,
name=name + "/expand_bn3")
if downsample:
identity = avgpool2d(
x=identity,
pool_size=3,
strides=2,
padding=1,
name=name + "/avgpool")
x = nn.concatenate([x, identity], axis=get_channel_axis(), name=name + "/concat")
else:
x = nn.add([x, identity], name=name + "/add")
x = nn.Activation("relu", name=name + "/final_activ")(x)
return x
def shuffle_init_block(x,
in_channels,
out_channels,
name="shuffle_init_block"):
"""
ShuffleNet specific initial block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
name : str, default 'shuffle_init_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
x = conv3x3(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
name=name + "/conv")
x = batchnorm(
x=x,
name=name + "/bn")
x = nn.Activation("relu", name=name + "/activ")(x)
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
name=name + "/pool")
return x
def shufflenet(channels,
init_block_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
(in_size[0], in_size[1], in_channels)
input = nn.Input(shape=input_shape)
x = shuffle_init_block(
x=input,
in_channels=in_channels,
out_channels=init_block_channels,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = nn.AvgPool2D(
pool_size=7,
strides=1,
name="features/final_pool")(x)
x = flatten(x)
x = nn.Dense(
units=classes,
input_dim=in_channels,
name="output")(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_shufflenet(groups,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create ShuffleNet model with specific parameters.
Parameters:
----------
groups : int
Number of groups in convolution layers.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
layers = [4, 8, 4]
if groups == 1:
channels_per_layers = [144, 288, 576]
elif groups == 2:
channels_per_layers = [200, 400, 800]
elif groups == 3:
channels_per_layers = [240, 480, 960]
elif groups == 4:
channels_per_layers = [272, 544, 1088]
elif groups == 8:
channels_per_layers = [384, 768, 1536]
else:
raise ValueError("The {} of groups is not supported".format(groups))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
net = shufflenet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def shufflenet_g1_w1(**kwargs):
"""
ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs)
def shufflenet_g2_w1(**kwargs):
"""
ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs)
def shufflenet_g3_w1(**kwargs):
"""
ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs)
def shufflenet_g4_w1(**kwargs):
"""
ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs)
def shufflenet_g8_w1(**kwargs):
"""
ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs)
def shufflenet_g1_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs)
def shufflenet_g3_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs)
def shufflenet_g1_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs)
def shufflenet_g3_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs)
def shufflenet_g1_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs)
def shufflenet_g3_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
shufflenet_g1_w1,
shufflenet_g2_w1,
shufflenet_g3_w1,
shufflenet_g4_w1,
shufflenet_g8_w1,
shufflenet_g1_w3d4,
shufflenet_g3_w3d4,
shufflenet_g1_wd2,
shufflenet_g3_wd2,
shufflenet_g1_wd4,
shufflenet_g3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenet_g1_w1 or weight_count == 1531936)
assert (model != shufflenet_g2_w1 or weight_count == 1733848)
assert (model != shufflenet_g3_w1 or weight_count == 1865728)
assert (model != shufflenet_g4_w1 or weight_count == 1968344)
assert (model != shufflenet_g8_w1 or weight_count == 2434768)
assert (model != shufflenet_g1_w3d4 or weight_count == 975214)
assert (model != shufflenet_g3_w3d4 or weight_count == 1238266)
assert (model != shufflenet_g1_wd2 or weight_count == 534484)
assert (model != shufflenet_g3_wd2 or weight_count == 718324)
assert (model != shufflenet_g1_wd4 or weight_count == 209746)
assert (model != shufflenet_g3_wd4 or weight_count == 305902)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,527 | 30.689796 | 120 | py |
imgclsmob | imgclsmob-master/keras_/kerascv/models/common.py | """
Common routines for models in Keras.
"""
__all__ = ['round_channels', 'HSwish', 'is_channels_first', 'get_channel_axis', 'update_keras_shape', 'flatten',
'batchnorm', 'lrn', 'maxpool2d', 'avgpool2d', 'conv2d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3',
'conv_block', 'conv1x1_block', 'conv3x3_block', 'conv7x7_block', 'dwconv3x3_block', 'dwconv5x5_block',
'pre_conv_block', 'pre_conv1x1_block', 'pre_conv3x3_block', 'channel_shuffle_lambda', 'se_block']
import math
import numpy as np
from inspect import isfunction
from keras.layers import BatchNormalization
from keras import backend as K
from keras import layers as nn
from keras.engine.base_layer import Layer
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class ReLU6(Layer):
"""
ReLU6 activation layer.
Parameters:
----------
name : str, default 'ReLU6'
Layer name.
"""
def __init__(self,
name="ReLU6",
**kwargs):
super(ReLU6, self).__init__(name=name, **kwargs)
def call(self, x):
return nn.ReLU(max_value=6.0)(x)
class HSigmoid(Layer):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
Parameters:
----------
name : str, default 'HSigmoid'
Layer name.
"""
def __init__(self,
name="HSigmoid",
**kwargs):
super(HSigmoid, self).__init__(name=name, **kwargs)
def call(self, x):
return nn.ReLU(max_value=6.0)(x + 3.0) / 6.0
class HSwish(Layer):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
name : str, default 'HSwish'
Layer name.
"""
def __init__(self,
name="HSwish",
**kwargs):
super(HSwish, self).__init__(name=name, **kwargs)
def call(self, x):
return x * nn.ReLU(max_value=6.0)(x + 3.0) / 6.0
def swish(x,
name="swish"):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
name : str, default 'swish'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
w = nn.Activation("sigmoid", name=name + "/sigmoid")(x)
x = nn.multiply([x, w], name=name + "/mul")
return x
def get_activation_layer(x,
activation,
name="activ"):
"""
Create activation layer from string/function.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
activation : function or str
Activation function or name of activation function.
name : str, default 'activ'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
assert (activation is not None)
if isfunction(activation):
x = activation()(x)
elif isinstance(activation, str):
if activation == "relu":
x = nn.Activation("relu", name=name)(x)
elif activation == "relu6":
x = nn.ReLU(max_value=6.0, name=name)(x)
elif activation == "swish":
x = swish(x=x, name=name)
elif activation == "hswish":
x = HSwish(name=name)(x)
else:
raise NotImplementedError()
else:
x = activation(x)
return x
def is_channels_first():
"""
Is tested data format channels first.
Returns:
-------
bool
A flag.
"""
return K.image_data_format() == "channels_first"
def get_channel_axis():
"""
Get channel axis.
Returns:
-------
int
Channel axis.
"""
return 1 if is_channels_first() else -1
def update_keras_shape(x):
"""
Update Keras shape property.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
"""
if not hasattr(x, "_keras_shape"):
x._keras_shape = tuple([int(d) if (d is not None) and (d != 0) else None for d in x.shape])
def flatten(x,
reshape=False):
"""
Flattens the input to two dimensional.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
reshape : bool, default False
Whether do reshape instead of flatten.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if not is_channels_first():
def channels_last_flatten(z):
z = K.permute_dimensions(z, pattern=(0, 3, 1, 2))
z = K.reshape(z, shape=(-1, np.prod(K.int_shape(z)[1:])))
update_keras_shape(z)
return z
return nn.Lambda(channels_last_flatten)(x)
else:
if reshape:
x = nn.Reshape((-1,))(x)
else:
x = nn.Flatten()(x)
return x
def batchnorm(x,
momentum=0.9,
epsilon=1e-5,
name=None):
"""
Batch normalization layer.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
name : str, default None
Layer name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if K.backend() == "mxnet":
x = GluonBatchNormalization(
momentum=momentum,
epsilon=epsilon,
name=name)(x)
else:
x = nn.BatchNormalization(
axis=get_channel_axis(),
momentum=momentum,
epsilon=epsilon,
name=name)(x)
return x
def lrn(x,
alpha=1e-4,
beta=0.75,
k=2,
n=5,
name=None):
"""
Local response normalization layer.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
alpha : float, 1e-4
Variance scaling parameter alpha in the LRN expression.
beta : float, 0.75
Power parameter beta in the LRN expression.
k : float, 2
Parameter k in the LRN expression.
n : int, 5
Normalization window width in elements.
name : str, default None
Layer name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if K.backend() == "mxnet":
from keras.backend.mxnet_backend import keras_mxnet_symbol, KerasSymbol
import mxnet as mx
@keras_mxnet_symbol
def gluon_lrn(x,
alpha,
beta,
k,
n):
if isinstance(x, KerasSymbol):
x = x.symbol
if isinstance(alpha, KerasSymbol):
alpha = alpha.symbol
if isinstance(beta, KerasSymbol):
beta = beta.symbol
if isinstance(k, KerasSymbol):
k = k.symbol
if isinstance(n, KerasSymbol):
n = n.symbol
return KerasSymbol(mx.sym.LRN(
data=x,
alpha=alpha,
beta=beta,
knorm=k,
nsize=n))
x = nn.Lambda(
lambda z: gluon_lrn(
x=z,
alpha=alpha,
beta=beta,
k=k,
n=n))(x)
else:
import tensorflow as tf
x = nn.Lambda(
lambda z: tf.nn.lrn(
input=z,
depth_radius=n,
bias=k,
alpha=alpha,
beta=beta,
name=name))(x)
return x
def maxpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
name=None):
"""
Max pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
name : str, default None
Layer name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
assert (padding[0] == 0) or (padding[0] == (pool_size[0] - 1) // 2)
assert (padding[1] == 0) or (padding[1] == (pool_size[1] - 1) // 2)
padding_ke = "valid" if padding[0] == 0 else "same"
if K.backend() == "tensorflow":
if ceil_mode:
height = int(x.shape[2])
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
width = int(x.shape[3])
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
import tensorflow as tf
x = nn.Lambda(
(lambda z: tf.pad(z, [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2], mode="REFLECT"))
if is_channels_first() else
(lambda z: tf.pad(z, [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]], mode="REFLECT")))(x)
padding_ke = "valid"
else:
if ceil_mode:
padding0 = 0 if padding_ke == "valid" else strides[0] // 2
height = x._keras_shape[2 if is_channels_first() else 1]
out_height = float(height + 2 * padding0 - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
assert (strides[0] <= 3)
padding_ke = "same"
x = nn.MaxPool2D(
pool_size=pool_size,
strides=strides,
padding=padding_ke,
name=name + "/pool")(x)
return x
def avgpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
name=None):
"""
Average pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
name : str, default None
Layer name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
assert (padding[0] == 0) or (padding[0] == (pool_size[0] - 1) // 2)
assert (padding[1] == 0) or (padding[1] == (pool_size[1] - 1) // 2)
padding_ke = "valid" if padding[0] == 0 else "same"
if K.backend() == "tensorflow":
if ceil_mode:
height = int(x.shape[2])
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
width = int(x.shape[3])
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
import tensorflow as tf
x = nn.Lambda(
(lambda z: tf.pad(z, [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2], mode="REFLECT"))
if is_channels_first() else
(lambda z: tf.pad(z, [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]], mode="REFLECT")))(x)
x = nn.AvgPool2D(
pool_size=pool_size,
strides=1,
padding="valid",
name=name + "/pool")(x)
if (strides[0] > 1) or (strides[1] > 1):
x = nn.AvgPool2D(
pool_size=1,
strides=strides,
padding="valid",
name=name + "/stride")(x)
return x
x = nn.AvgPool2D(
pool_size=pool_size,
strides=strides,
padding=padding_ke,
name=name + "/pool")(x)
return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
dilation=1,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if isinstance(dilation, int):
dilation = (dilation, dilation)
extra_pad = False
if K.backend() == "tensorflow":
if (padding[0] > 0) or (padding[1] > 0):
import tensorflow as tf
x = nn.Lambda(
(lambda z: tf.pad(z, [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]))
if is_channels_first() else
(lambda z: tf.pad(z, [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]])))(x)
if not ((padding[0] == padding[1]) and (kernel_size[0] == kernel_size[1]) and
(kernel_size[0] // 2 == padding[0])):
extra_pad = True
padding_ke = "valid"
else:
if (padding[0] == padding[1]) and (padding[0] == 0):
padding_ke = "valid"
elif (padding[0] == padding[1]) and (kernel_size[0] == kernel_size[1]) and (kernel_size[0] // 2 == padding[0]):
padding_ke = "same"
else:
x = nn.ZeroPadding2D(
padding=padding,
name=name + "/pad")(x)
padding_ke = "valid"
extra_pad = True
if groups == 1:
if extra_pad:
name = name + "/conv"
x = nn.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding_ke,
dilation_rate=dilation,
use_bias=use_bias,
name=name)(x)
elif (groups == out_channels) and (out_channels == in_channels):
assert (dilation[0] == 1) and (dilation[1] == 1)
if extra_pad:
name = name + "/conv"
x = nn.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=padding_ke,
use_bias=use_bias,
name=name)(x)
else:
assert (in_channels % groups == 0)
assert (out_channels % groups == 0)
none_batch = (x._keras_shape[0] is None)
in_group_channels = in_channels // groups
out_group_channels = out_channels // groups
group_list = []
for gi in range(groups):
xi = nn.Lambda(
(lambda z: z[:, gi * in_group_channels:(gi + 1) * in_group_channels, :, :])
if is_channels_first() else
(lambda z: z[:, :, :, gi * in_group_channels:(gi + 1) * in_group_channels]))(x)
xi = nn.Conv2D(
filters=out_group_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding_ke,
dilation_rate=dilation,
use_bias=use_bias,
name=name + "/convgroup{}".format(gi + 1))(xi)
group_list.append(xi)
x = nn.concatenate(group_list, axis=get_channel_axis(), name=name + "/concat")
if none_batch and (x._keras_shape[0] is not None):
x._keras_shape = (None, ) + x._keras_shape[1:]
return x
def conv1x1(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
name="conv1x1"):
"""
Convolution 1x1 layer.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv1x1'
Layer name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
name=name)
def conv3x3(x,
in_channels,
out_channels,
strides=1,
padding=1,
groups=1,
name="conv3x3"):
"""
Convolution 3x3 layer.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
name : str, default 'conv3x3'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
name=name)
def depthwise_conv3x3(x,
channels,
strides,
name="depthwise_conv3x3"):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
name : str, default 'depthwise_conv3x3'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv2d(
x=x,
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=1,
groups=channels,
use_bias=False,
name=name)
def conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
activation="relu",
name="conv_block"):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
name : str, default 'conv_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
name=name + "/conv")
if use_bn:
x = batchnorm(
x=x,
epsilon=bn_epsilon,
name=name + "/bn")
if activation is not None:
x = get_activation_layer(
x=x,
activation=activation,
name=name + "/activ")
return x
def conv1x1_block(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
bn_epsilon=1e-5,
activation="relu",
name="conv1x1_block"):
"""
1x1 version of the standard convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
name : str, default 'conv1x1_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
activation=activation,
name=name)
def conv3x3_block(x,
in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
activation="relu",
name="conv3x3_block"):
"""
3x3 version of the standard convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
name : str, default 'conv3x3_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=activation,
name=name)
def conv5x5_block(x,
in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
groups=1,
use_bias=False,
bn_epsilon=1e-5,
activation="relu",
name="conv3x3_block"):
"""
5x5 version of the standard convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
name : str, default 'conv3x3_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
activation=activation,
name=name)
def conv7x7_block(x,
in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
activation="relu",
name="conv7x7_block"):
"""
3x3 version of the standard convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
name : str, default 'conv7x7_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
activation=activation,
name=name)
def dwconv3x3_block(x,
in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
activation="relu",
name="dwconv3x3_block"):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
name : str, default 'dwconv3x3_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
activation=activation,
name=name)
def dwconv5x5_block(x,
in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
activation="relu",
name="dwconv3x3_block"):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
name : str, default 'dwconv3x3_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv5x5_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
activation=activation,
name=name)
def pre_conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
return_preact=False,
name="pre_conv_block"):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
name : str, default 'pre_conv_block'
Block name.
Returns:
-------
tuple of two keras.backend tensor/variable/symbol
Resulted tensor and preactivated input tensor.
"""
x = batchnorm(
x=x,
name=name + "/bn")
x = nn.Activation("relu", name=name + "/activ")(x)
if return_preact:
x_pre_activ = x
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
name=name + "/conv")
if return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(x,
in_channels,
out_channels,
strides=1,
return_preact=False,
name="preres_conv1x1"):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
return_preact : bool, default False
Whether return pre-activation.
name : str, default 'preres_conv1x1'
Block name.
Returns:
-------
tuple of two keras.backend tensor/variable/symbol
Resulted tensor and preactivated input tensor.
"""
return pre_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
return_preact=return_preact,
name=name)
def pre_conv3x3_block(x,
in_channels,
out_channels,
strides=1,
return_preact=False,
name="pre_conv3x3_block"):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
return_preact : bool, default False
Whether return pre-activation.
name : str, default 'pre_conv3x3_block'
Block name.
Returns:
-------
tuple of two keras.backend tensor/variable/symbol
Resulted tensor and preactivated input tensor.
"""
return pre_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
return_preact=return_preact,
name=name)
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
groups : int
Number of groups.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if is_channels_first():
batch, channels, height, width = x._keras_shape
else:
batch, height, width, channels = x._keras_shape
# assert (channels % groups == 0)
channels_per_group = channels // groups
if is_channels_first():
x = K.reshape(x, shape=(-1, groups, channels_per_group, height, width))
x = K.permute_dimensions(x, pattern=(0, 2, 1, 3, 4))
x = K.reshape(x, shape=(-1, channels, height, width))
else:
x = K.reshape(x, shape=(-1, height, width, groups, channels_per_group))
x = K.permute_dimensions(x, pattern=(0, 1, 2, 4, 3))
x = K.reshape(x, shape=(-1, height, width, channels))
update_keras_shape(x)
return x
def channel_shuffle_lambda(channels,
groups,
**kwargs):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
Returns:
-------
Layer
Channel shuffle layer.
"""
assert (channels % groups == 0)
return nn.Lambda(channel_shuffle, arguments={"groups": groups}, **kwargs)
def se_block(x,
channels,
reduction=16,
approx_sigmoid=False,
round_mid=False,
activation="relu",
name="se_block"):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
approx_sigmoid : bool, default False
Whether to use approximated sigmoid function.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function or str, default 'relu'
Activation function or name of activation function.
name : str, default 'se_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
assert(len(x._keras_shape) == 4)
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
pool_size = x._keras_shape[2:4] if is_channels_first() else x._keras_shape[1:3]
w = nn.AvgPool2D(
pool_size=pool_size,
name=name + "/pool")(x)
w = conv1x1(
x=w,
in_channels=channels,
out_channels=mid_channels,
use_bias=True,
name=name + "/conv1")
w = get_activation_layer(
x=w,
activation=activation,
name=name + "/activ")
w = conv1x1(
x=w,
in_channels=mid_channels,
out_channels=channels,
use_bias=True,
name=name + "/conv2")
w = HSigmoid(name=name + "/hsigmoid")(w) if approx_sigmoid else nn.Activation("sigmoid", name=name + "/sigmoid")(w)
x = nn.multiply([x, w], name=name + "/mul")
return x
class GluonBatchNormalization(BatchNormalization):
"""
Batch normalization layer wrapper for implementation of the Gluon type of BatchNorm default parameters.
Parameters:
----------
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
center : bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale : bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.activate`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer : str, default 'zeros'
Initializer for the beta weight.
gamma_initializer : str, default 'ones'
Initializer for the gamma weight.
moving_mean_initializer : str, default 'zeros'
Initializer for the moving mean.
moving_variance_initializer : str, default 'ones'
Initializer for the moving variance.
beta_regularizer : str or None, default None
Optional regularizer for the beta weight.
gamma_regularizer : str or None, default None
Optional regularizer for the gamma weight.
beta_constraint : str or None, default None
Optional constraint for the beta weight.
gamma_constraint : str or None, default None
Optional constraint for the gamma weight.
fix_gamma : bool, default False
Fix gamma while training.
"""
def __init__(self,
momentum=0.9,
epsilon=1e-5,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
fix_gamma=False,
**kwargs):
super(GluonBatchNormalization, self).__init__(
axis=get_channel_axis(),
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
**kwargs)
self.fix_gamma = fix_gamma
def call(self, inputs, training=None):
if K.backend() == "mxnet":
from keras.backend.mxnet_backend import keras_mxnet_symbol, KerasSymbol
import mxnet as mx
@keras_mxnet_symbol
def gluon_batchnorm(x,
gamma,
beta,
moving_mean,
moving_var,
momentum=0.9,
axis=1,
epsilon=1e-5,
fix_gamma=False):
"""
Apply native MXNet/Gluon batch normalization on x with given moving_mean, moving_var, beta and gamma.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
gamma : keras.backend tensor/variable/symbol
Tensor by which to scale the input.
beta : keras.backend tensor/variable/symbol
Tensor by which to center the input.
moving_mean : keras.backend tensor/variable/symbol
Moving mean.
moving_var : keras.backend tensor/variable/symbol
Moving variance.
momentum : float, default 0.9
Momentum for the moving average.
axis : int, default 1
Axis along which BatchNorm is applied. Axis usually represent axis of 'channels'. MXNet follows
'channels_first'.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
fix_gamma : bool, default False
Fix gamma while training.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if isinstance(x, KerasSymbol):
x = x.symbol
if isinstance(moving_mean, KerasSymbol):
moving_mean = moving_mean.symbol
if isinstance(moving_var, KerasSymbol):
moving_var = moving_var.symbol
if isinstance(beta, KerasSymbol):
beta = beta.symbol
if isinstance(gamma, KerasSymbol):
gamma = gamma.symbol
return KerasSymbol(mx.sym.BatchNorm(
data=x,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
momentum=momentum,
axis=axis,
eps=epsilon,
fix_gamma=fix_gamma))
return gluon_batchnorm(
x=inputs,
gamma=self.gamma,
beta=self.beta,
moving_mean=self.moving_mean,
moving_var=self.moving_variance,
momentum=self.momentum,
axis=self.axis,
epsilon=self.epsilon,
fix_gamma=self.fix_gamma)
else:
super(GluonBatchNormalization, self).call(inputs, training)
| 47,316 | 29.44852 | 120 | py |
imgclsmob | imgclsmob-master/keras_/kerascv/models/model_store.py | """
Model store which provides pretrained models.
"""
__all__ = ['get_model_file', 'load_model', 'download_model']
import os
import zipfile
import logging
import hashlib
import warnings
import numpy as np
import h5py
from keras import backend as K
from keras.engine.saving import load_attributes_from_hdf5_group
_model_sha1 = {name: (error, checksum, repo_release_tag) for name, error, checksum, repo_release_tag in [
('alexnet', '1788', 'b00ce627a6c74fe471eb7aebf906fcfa79387861', 'v0.0.394'),
('alexnetb', '1853', '045e80b5a055c0006215b1f416c0e4b03455b3e5', 'v0.0.384'),
('zfnet', '1715', '3226638b9270c0f3a2ad5302d56b3f9d47706b88', 'v0.0.395'),
('zfnetb', '1483', '6ff6768e463fdd333cef5ecb821a456cff6debb8', 'v0.0.400'),
('vgg11', '1016', 'c6bc31d0f1f1575081107f4ea8e2ecec3132bfb4', 'v0.0.381'),
('vgg13', '0950', 'f0e5bed7cb64111b0fdf73875a33500e8f78a365', 'v0.0.388'),
('vgg16', '0832', 'baf4278d9d75dbb76b459e7f30f2d1e18c44ae1b', 'v0.0.401'),
('vgg19', '0767', '315c0bc8ddcdfd90b503a6fa197a596c1b23b897', 'v0.0.420'),
('bn_vgg11', '0934', '96a967baaf97ebf8c5802f60aabbc30fc59d9027', 'v0.0.339'),
('bn_vgg13', '0887', 'd4a3da4039babf9ec89348ed3a9f11bb2d899c25', 'v0.0.353'),
('bn_vgg16', '0757', '2960ba135dda2678514e0ed6d1118adc9ff4e9dc', 'v0.0.359'),
('bn_vgg19', '0689', 'aaee8cb7d2c70a4db5dc3c665158b835e00c3fbc', 'v0.0.360'),
('bn_vgg11b', '0975', '8a35fd728d1e35570ffaedffd4d8fc8f968f4bfe', 'v0.0.407'),
('bn_vgg13b', '1016', 'b26cafd39447f039a8124dda8a177b2dc72d98f3', 'v0.0.123'),
('bn_vgg16b', '0865', '2272fdd110106e830920bfdd5999fa58737f20e4', 'v0.0.123'),
('bn_vgg19b', '0814', '852e2ca228821f3ea1d32a12ce47a9a001236f5e', 'v0.0.123'),
('resnet10', '1385', '0a7d3ca6c6616d0a55ebfb0faabe56af980509f5', 'v0.0.248'),
('resnet12', '1303', '3ba378deed1b148fff66a83c2ef195a0acaca563', 'v0.0.253'),
('resnet14', '1220', 'b7cfec5936dad4f7c56620c331d5546be57a3ab3', 'v0.0.256'),
('resnetbc14b', '1116', 'defe7c1982bf0b60b6043792210631ff36448f30', 'v0.0.309'),
('resnet16', '1088', 'cc0968d30689e278ba68cd80c8b76a430c8a24a3', 'v0.0.259'),
('resnet18_wd4', '1741', '6d84323b46771d914075db6665ca1f164a0936f7', 'v0.0.262'),
('resnet18_wd2', '1283', '8e70ce72e5e9ab6925aac0b4342156d3ba462523', 'v0.0.263'),
('resnet18_w3d4', '1066', 'afa3a2391bfb24bc46a19ad0332fc9824418a8a7', 'v0.0.266'),
('resnet18', '0952', '0817d05847105afe00b42c99ab4dc8031f196fbe', 'v0.0.153'),
('resnet26', '0837', 'b3c764c0a35c1e2bca5d0526a72fc2b007f72f97', 'v0.0.305'),
('resnetbc26b', '0759', 'a1916fd0f4ffa2d116c20be61d7f78c52aaea58b', 'v0.0.313'),
('resnet34', '0744', 'd366daf86c928d3aa2efd63328fa37e918d6fa32', 'v0.0.291'),
('resnetbc38b', '0672', '703a75434656b7892a121993353ac3cefbeb91e4', 'v0.0.328'),
('resnet50', '0604', '8e1e86d39b65517592152eccf6e6d2eca7cf2a9b', 'v0.0.329'),
('resnet50b', '0610', '8a54fb83791e86cdc190fc9a35cfab30c70d394b', 'v0.0.308'),
('resnet101', '0599', 'ab4289478d017d3b929011c26fbcf8b54dd8ce07', 'v0.0.49'),
('resnet101b', '0511', '84e8ef696cfec4990365c9b0f6afa641a6330357', 'v0.0.357'),
('resnet152', '0535', '43ecb2b0cc2dccd771aea77b674c64a69d449164', 'v0.0.144'),
('resnet152b', '0479', 'a0dd484cb7afdb2a813858df487c484f17543683', 'v0.0.378'),
('preresnet10', '1401', '2349a7c822eac8821120aff6417de0bba99d7966', 'v0.0.249'),
('preresnet12', '1322', '32f2f50c15e6320202fdaf40790e03f4f469a281', 'v0.0.257'),
('preresnet14', '1219', 'b123205e636c19708eb4a311808c6894c28794cb', 'v0.0.260'),
('preresnetbc14b', '1151', '8989bc9fea0e2acb6b31e1b891078fd9e55559d5', 'v0.0.315'),
('preresnet16', '1081', 'ec02b7995fedb58cda1c20c236fa44be0d7b434d', 'v0.0.261'),
('preresnet18_wd4', '1778', '13ecb34c8031d30d01c19542b17b96c83fba1f32', 'v0.0.272'),
('preresnet18_wd2', '1319', '694dbc5bb6f20657478d56f280cac67673103c23', 'v0.0.273'),
('preresnet18_w3d4', '1068', '13000951d0737fb3c180dbf4f8c6c116de1c5086', 'v0.0.274'),
('preresnet18', '0952', 'b88bf7670642b313929649a20b2a07e4cbe3b35a', 'v0.0.140'),
('preresnet26', '0834', 'be46d91ce5f85b2fadcd77e0a126600221dbd826', 'v0.0.316'),
('preresnetbc26b', '0786', 'f6ab507bce438a1cfb033558ec85ec78ff248d99', 'v0.0.325'),
('preresnet34', '0751', 'fcccbc33435c60f9257f50c5bb8b2ea0fb626535', 'v0.0.300'),
('preresnetbc38b', '0633', 'b6793dec9fa0893cad19ec346ed3651a01d75a87', 'v0.0.348'),
('preresnet50', '0620', '91bd3a6071d230d061eda0e6af22eff0a782b47c', 'v0.0.330'),
('preresnet50b', '0632', 'd3f20f4ea7dc030bb6be59898a79522525263d05', 'v0.0.307'),
('preresnet101', '0575', '5dff088de44ce782ac72b4c5fbc03de83b379d1c', 'v0.0.50'),
('preresnet101b', '0540', 'e70bed8e7abd2a50fb7a74464fc1da06e83b8ab1', 'v0.0.351'),
('preresnet152', '0531', 'a5ac128d79e3e6eb01a4a5eeb571e252482edbc7', 'v0.0.50'),
('preresnet152b', '0500', '360cd64056ab0d0d00de059cc748ad7e54ebf258', 'v0.0.386'),
('preresnet200b', '0564', '9172d4c02aef8c6ff1504dcf3c299518325afae0', 'v0.0.50'),
('preresnet269b', '0556', 'bdd89388474c482c432d3af5d5c4231b33e68588', 'v0.0.239'),
('resnext14_16x4d', '1224', '146ff5dae72156ed07a7a0a679ae419d1ece78b5', 'v0.0.370'),
('resnext14_32x2d', '1246', '3af87217c5a811d5f303986c7f9a27955daed304', 'v0.0.371'),
('resnext14_32x4d', '1110', '86af26f7ab4c0c7f30dea890cbd66e79444b16cb', 'v0.0.327'),
('resnext26_32x2d', '0850', '0e54facd6ad17180075862cf032fed1a30e6e034', 'v0.0.373'),
('resnext26_32x4d', '0720', 'a5e34838cc78ff16c4b3e1bf1c00acaa4f205d53', 'v0.0.332'),
('resnext50_32x4d', '0546', '1c9906b02b3194c568ccf13a478fc7e81a8edb29', 'v0.0.417'),
('resnext101_32x4d', '0492', '24e9dbdb2350ad74c2e054c3260b2db1c860ea05', 'v0.0.417'),
('resnext101_64x4d', '0483', 'a6b4bdefff3bac5c435d4b3a2cd46eae298be209', 'v0.0.417'),
('seresnet10', '1329', 'f70cf6c73471f3878641414d95dc979a7acbb221', 'v0.0.354'),
('seresnet18', '0920', 'bb27e27345afaee959e42eada2e79f91ffa2fb22', 'v0.0.355'),
('seresnet26', '0803', 'd689714732e408238ee85de95e06aeb56aed4002', 'v0.0.363'),
('seresnetbc26b', '0682', 'ba3e51706b787ba4034bbb74d2b60afeb16cc2e8', 'v0.0.366'),
('seresnetbc38b', '0575', '536881363a6bbc41cda686dff4881c9a2acd086c', 'v0.0.374'),
('seresnet50', '0643', 'fabfa4062a7724ea31752434a687e1837eb30932', 'v0.0.52'),
('seresnet50b', '0533', 'bc9d11ec3038951cac6e03c33c3abd61eb61e9a4', 'v0.0.387'),
('seresnet101', '0588', '933d34159345f5cf9a663504f03cd423b527aeac', 'v0.0.52'),
('seresnet152', '0577', 'd25ced7d6369f3d14ed2cfe54fb70bc4be9c68e0', 'v0.0.52'),
('sepreresnet10', '1306', '6096e4d9873949faf31ffbed58a321d8b2396ba7', 'v0.0.377'),
('sepreresnet18', '0938', 'd0bf29b9a7d489a5be3a3a802be7c9bb87a5df5f', 'v0.0.380'),
('sepreresnetbc26b', '0636', 'cc11e087d240944f6e5e8952460ba2f417d91950', 'v0.0.399'),
('sepreresnetbc38b', '0563', 'f4b96ed792b0f92473c8f43763cf6b6340d19960', 'v0.0.409'),
('seresnext50_32x4d', '0505', '077f048f2b4e4fd1946c6c3f85a07b9566dc6271', 'v0.0.418'),
('seresnext101_32x4d', '0460', '08ea8055b2b3d8c5c2eafcb200355968649c8f52', 'v0.0.418'),
('seresnext101_64x4d', '0466', '28ff2d1f7f77569101515fdfb93298feb936d33a', 'v0.0.418'),
('senet16', '0806', '8a634c501ee89777cfd0af9ec8b953e7ebc1a5de', 'v0.0.341'),
('senet28', '0591', '33c65063c8889f065cd92cee5abe4fee3a129eec', 'v0.0.356'),
('senet154', '0465', '962aeede627d5196eaf0cf8c25b6f7281f62e9ea', 'v0.0.54'),
('densenet121', '0684', '7c6d506aa37ffdbab6fbe8ee45f8ef8d9b505fa2', 'v0.0.314'),
('densenet161', '0618', '070fcb455db45c45aeb67fa4fb0fda4a89b7ef45', 'v0.0.55'),
('densenet169', '0605', '7b3b7888c19a672d914800bdebc701ce6bb9f360', 'v0.0.406'),
('densenet201', '0635', 'cf3afbb259163bb76eee519f9d43ddbdf0a583b9', 'v0.0.55'),
('darknet_tiny', '1746', '147e949b779914331f740badc82339a2fb5bcb11', 'v0.0.69'),
('darknet_ref', '1668', '2ef080bb6f470e5ffb0c625ff3047de97cfeb6e2', 'v0.0.64'),
('darknet53', '0556', 'd6c6e7dcb96bd6d6789f35c41ac9abb4474b4bf1', 'v0.0.150'),
('squeezenet_v1_0', '1756', 'a489092344c0214c402655210e031a1441bd70d1', 'v0.0.128'),
('squeezenet_v1_1', '1739', 'b9a8f9eae7a48d053895fe4a362d1d8eb592e994', 'v0.0.88'),
('squeezeresnet_v1_0', '1780', 'fb9a54aac20d59f73111fee0745e144b183a66d9', 'v0.0.178'),
('squeezeresnet_v1_1', '1784', '43ee9cbbb91046f5316ee14e227f8323b1801b51', 'v0.0.70'),
('sqnxt23_w1', '1862', 'cab60636597912e7861d7b6618ecb390b90545ec', 'v0.0.171'),
('sqnxt23v5_w1', '1757', '96b94e1dfa1872f96f9b1ce99546a0613bfb1775', 'v0.0.172'),
('sqnxt23_w3d2', '1330', 'e52625a000e7a0b02fdf01c64b18a8a21c10b7cd', 'v0.0.210'),
('sqnxt23v5_w3d2', '1284', 'fd150fcca3fb73242650ba5e705cc2947def075e', 'v0.0.212'),
('sqnxt23_w2', '1066', 'a34e73b9645874532b42bf4a12765080d4c53fb1', 'v0.0.240'),
('sqnxt23v5_w2', '1028', '13c5a59866483b958bb116a60001b31f783022a4', 'v0.0.216'),
('shufflenet_g1_wd4', '3676', 'cb39b77366909eb13b736497c6eb239efb69e4ac', 'v0.0.134'),
('shufflenet_g3_wd4', '3615', '21150468a44c548845b2304700445485407670c7', 'v0.0.135'),
('shufflenet_g1_wd2', '2238', '76709a36a9fb8feb2c9ac50fecfcbccdc2bf77ec', 'v0.0.174'),
('shufflenet_g3_wd2', '2060', '173a725c1a8b66be6f5b044f0994634113cff8b0', 'v0.0.167'),
('shufflenet_g1_w3d4', '1675', '56aa41794ba19d865c06dba56fd73f676dec1f48', 'v0.0.218'),
('shufflenet_g3_w3d4', '1609', '34e28781782082e73a06c7230b6c87caacf58945', 'v0.0.219'),
('shufflenet_g1_w1', '1350', 'f44c8a1823606c81f3524038333356fc8f022cd6', 'v0.0.223'),
('shufflenet_g2_w1', '1332', '8784a32bb15e2bb49496ee6d151539d4eb085bbb', 'v0.0.241'),
('shufflenet_g3_w1', '1329', '0e213e7696a5ae086648152b9e28819798259081', 'v0.0.244'),
('shufflenet_g4_w1', '1310', 'ef2ff63e8fad961d1b38ba711e2b2ecadd078508', 'v0.0.245'),
('shufflenet_g8_w1', '1320', '796314f132292c36d09baa8486b5e40d974ecc4d', 'v0.0.250'),
('shufflenetv2_wd2', '1840', '9b4b0964301ba3f2e393c3d3b9a43de3bb480b05', 'v0.0.90'),
('shufflenetv2_w1', '1133', 'bcba973eb9f0c333564ed9761ecfd77d28326e5b', 'v0.0.133'),
('shufflenetv2_w3d2', '0927', '17a260398afbc6b27b9ab917d538e36993c12fb9', 'v0.0.288'),
('shufflenetv2_w2', '0822', 'a0209f14172e8c7c7c4a8e54307641ed69838beb', 'v0.0.301'),
('shufflenetv2b_wd2', '1783', 'ca8409ae44489695b468ceb7104e1cc63cb09873', 'v0.0.211'),
('shufflenetv2b_w1', '1101', '1caf1b22107357e3ed7409545eff6e815044bcb7', 'v0.0.211'),
('shufflenetv2b_w3d2', '0880', '265c3c7c077dd66f435bcc5f239010fd975f7006', 'v0.0.211'),
('shufflenetv2b_w2', '0810', '2149df381bcb370856cb4c7a27130d50a96b61f9', 'v0.0.242'),
('menet108_8x1_g3', '2031', 'a4d43433e2d9f770c406b3f780a8960609c0e9b8', 'v0.0.89'),
('menet128_8x1_g4', '1914', '5bb8f2287930abb3e921842f053d6592f7034ea7', 'v0.0.103'),
('menet160_8x1_g8', '2028', '09664de97e30e93189cf6d535c3a297b9c8c190e', 'v0.0.154'),
('menet228_12x1_g3', '1288', 'c2eeac242640ba862e04d9f7b67bcfe608b1c269', 'v0.0.131'),
('menet256_12x1_g4', '1217', 'b020cc33586896c2c8501c84e72a38818778c796', 'v0.0.152'),
('menet348_12x1_g3', '0936', '6795f0079484c1c4b4f65df1df5e68302861340a', 'v0.0.173'),
('menet352_12x1_g8', '1167', 'a9d9412dcebfaf7682c4c7cb8c7a1232f04bcce6', 'v0.0.198'),
('menet456_24x1_g3', '0780', '6645f5946ddda7039ac5fb4cfcac8a4e1338df52', 'v0.0.237'),
('mobilenet_wd4', '2217', 'fb7abda85e29c592f0196ff4a76b9ee2951c6e3c', 'v0.0.62'),
('mobilenet_wd2', '1330', 'aa86f3554b83e1a818b197e07cbc16585e1d15a3', 'v0.0.156'),
('mobilenet_w3d4', '1051', 'd200ad45590faa190c194ae9ca6853c19af97b63', 'v0.0.130'),
('mobilenet_w1', '0866', '9661b555d739c4bb2c519c598a96a1b3d288b006', 'v0.0.155'),
('fdmobilenet_wd4', '3052', '6c219205677d97f8c07479c7fdfe51990d608f84', 'v0.0.177'),
('fdmobilenet_wd2', '1969', '5678a212ba44317306e2960ddeed6a5c0489122f', 'v0.0.83'),
('fdmobilenet_w3d4', '1601', '2ea5eba9e1b8caf9235b71835971f868a9b0d1de', 'v0.0.159'),
('fdmobilenet_w1', '1312', 'e11d0dce083322e06e5ca296d2dfa5dff742d74a', 'v0.0.162'),
('mobilenetv2_wd4', '2412', '622733723bdd6b9df10723f16a465586be1c3d4b', 'v0.0.137'),
('mobilenetv2_wd2', '1443', 'c7086bcc628b74e2ed942631e1ed2d3fa8b2657b', 'v0.0.170'),
('mobilenetv2_w3d4', '1044', '29e9923c74c059abac6c5194c04570837510974a', 'v0.0.230'),
('mobilenetv2_w1', '0864', '5e487e824d18fc8f776b3103bab677ed1a81b6ab', 'v0.0.213'),
('mobilenetv3_large_w1', '0769', 'fc909b4c0fa19a789806c254977c652ad782184b', 'v0.0.411'),
('igcv3_wd4', '2829', '00072cafe96ba57f84a689d3016b85224b234983', 'v0.0.142'),
('igcv3_wd2', '1704', 'b8961ca335abd1d66eb2cf180eb14381ebdcc3ee', 'v0.0.132'),
('igcv3_w3d4', '1097', 'fb365b725beaf38429a98a52b88a36d3e423329b', 'v0.0.207'),
('igcv3_w1', '0899', '968237cbd0a55b43f8847b919fa3ba02a27bb595', 'v0.0.243'),
('mnasnet_b1', '0800', '9ce379b36ee4738719c82e44ef1917e0a846fbb8', 'v0.0.419'),
('mnasnet_a1', '0755', '8bf70a05b4d97ed149324c74fd400a3273a9478d', 'v0.0.419'),
('efficientnet_b0', '0722', '2bea741f87b9e0d85570bb3753597a11654f2f78', 'v0.0.364'),
('efficientnet_b1', '0626', 'd7a4bf8be529396c2375c93f50f355ee7968ab3f', 'v0.0.376'),
('efficientnet_b0b', '0669', '436cc024344cb2f4160bfbc4b5fb6c23d9f96987', 'v0.0.403'),
('efficientnet_b1b', '0564', 'f2eb3cd8d915f9eacc90cccf10ee02c5be7475b4', 'v0.0.403'),
('efficientnet_b2b', '0516', '9c08b8392236ca0654c195bb0020b412d23340b7', 'v0.0.403'),
('efficientnet_b3b', '0431', 'd1545ea07602b3e661c10b3ee246819ea80cf0b7', 'v0.0.403'),
('efficientnet_b4b', '0376', 'c7e29f57ea62639cebe936bff55335caca23bf42', 'v0.0.403'),
('efficientnet_b5b', '0334', '4365cf122b0a5b514347220898118b43e4d0e271', 'v0.0.403'),
('efficientnet_b6b', '0312', '7f3f3465e9c2538c36c7d97997a5fea5f1883719', 'v0.0.403'),
('efficientnet_b7b', '0311', 'b4aac2ceee6e22d67b5e61c46be3e34a9aedd06f', 'v0.0.403')]}
imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob'
def get_model_name_suffix_data(model_name):
if model_name not in _model_sha1:
raise ValueError("Pretrained model for {name} is not available.".format(name=model_name))
error, sha1_hash, repo_release_tag = _model_sha1[model_name]
return error, sha1_hash, repo_release_tag
def get_model_file(model_name,
local_model_store_dir_path=os.path.join("~", ".keras", "models")):
"""
Return location for the pretrained on local file system. This function will download from online model zoo when
model cannot be found or has mismatch. The root directory will be created if it doesn't exist.
Parameters:
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $KERAS_HOME/models
Location for keeping the model parameters.
Returns:
-------
file_path
Path to the requested pretrained model file.
"""
error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name)
short_sha1 = sha1_hash[:8]
file_name = "{name}-{error}-{short_sha1}.h5".format(
name=model_name,
error=error,
short_sha1=short_sha1)
local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path)
file_path = os.path.join(local_model_store_dir_path, file_name)
if os.path.exists(file_path):
if _check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning("Mismatch in the content of model file detected. Downloading again.")
else:
logging.info("Model file not found. Downloading to {}.".format(file_path))
if not os.path.exists(local_model_store_dir_path):
os.makedirs(local_model_store_dir_path)
zip_file_path = file_path + ".zip"
_download(
url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format(
repo_url=imgclsmob_repo_url,
repo_release_tag=repo_release_tag,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_model_store_dir_path)
os.remove(zip_file_path)
if _check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError("Downloaded file has different hash. Please try again.")
def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""Download an given URL
Parameters:
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns:
-------
str
The file path of the downloaded file.
"""
import warnings
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
if path is None:
fname = url.split("/")[-1]
# Empty filenames are invalid
assert fname, "Can't construct file-name from this URL. Please set the `path` option manually."
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0"
if not verify_ssl:
warnings.warn("Unverified HTTPS request is being made (verify_ssl=False). Adding certificate verification"
" is strongly advised.")
if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print("Downloading {} from {}...".format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError("Failed downloading url {}".format(url))
with open(fname, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if sha1_hash and not _check_sha1(fname, sha1_hash):
raise UserWarning("File {} is downloaded but the content hash does not match."
" The repo may be outdated or download may be incomplete. "
"If the `repo_url` is overridden, consider switching to "
"the default repo.".format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
else:
print("download failed, retrying, {} attempt{} left"
.format(retries, "s" if retries > 1 else ""))
return fname
def _check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters:
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns:
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
def _preprocess_weights_for_loading(layer,
weights):
"""
Converts layers weights.
Parameters:
----------
layer : Layer
Layer instance.
weights : list of np.array
List of weights values.
Returns:
-------
list of np.array
A list of weights values.
"""
is_channels_first = (K.image_data_format() == "channels_first")
if ((K.backend() == "mxnet") and (not is_channels_first)) or (K.backend() == "tensorflow"):
if layer.__class__.__name__ == "Conv2D":
weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
elif layer.__class__.__name__ == "DepthwiseConv2D":
weights[0] = np.transpose(weights[0], (2, 3, 0, 1))
for i in range(len(weights)):
assert (K.int_shape(layer.weights[i]) == weights[i].shape)
return weights
def _load_weights_from_hdf5_group(f,
layers):
"""
Implements topological (order-based) weight loading.
Parameters:
----------
f : File
A pointer to a HDF5 group.
layers : list of np.array
List of target layers.
"""
filtered_layers = []
for layer in layers:
weights = layer.weights
if weights:
filtered_layers.append(layer)
layer_names = load_attributes_from_hdf5_group(f, "layer_names")
filtered_layer_names = []
for name in layer_names:
g = f[name]
weight_names = load_attributes_from_hdf5_group(g, "weight_names")
if weight_names:
filtered_layer_names.append(name)
layer_names = filtered_layer_names
if len(layer_names) != len(filtered_layers):
raise ValueError("You are trying to load a weight file "
"containing " + str(len(layer_names)) +
" layers into a model with " +
str(len(filtered_layers)) + " layers.")
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_names = load_attributes_from_hdf5_group(g, "weight_names")
weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]
layer = filtered_layers[k]
symbolic_weights = layer.weights
weight_values = _preprocess_weights_for_loading(
layer=layer,
weights=weight_values)
if len(weight_values) != len(symbolic_weights):
raise ValueError("Layer #" + str(k) +
" (named `" + layer.name +
"` in the current model) was found to "
"correspond to layer " + name +
" in the save file. "
"However the new layer " + layer.name +
" expects " + str(len(symbolic_weights)) +
" weights, but the saved weights have " +
str(len(weight_values)) +
" elements.")
weight_value_tuples += zip(symbolic_weights, weight_values)
K.batch_set_value(weight_value_tuples)
def _load_weights_from_hdf5_group_by_name(f,
layers):
"""
Implements name-based weight loading.
Parameters:
----------
f : File
A pointer to a HDF5 group.
layers : list of np.array
List of target layers.
"""
# New file format.
layer_names = load_attributes_from_hdf5_group(f, "layer_names")
# Reverse index of layer name to list of layers with name.
index = {}
for layer in layers:
if layer.name:
index.setdefault(layer.name, []).append(layer)
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_names = load_attributes_from_hdf5_group(g, "weight_names")
weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]
for layer in index.get(name, []):
symbolic_weights = layer.weights
weight_values = _preprocess_weights_for_loading(
layer=layer,
weights=weight_values)
if len(weight_values) != len(symbolic_weights):
warnings.warn("Skipping loading of weights for layer {} due to mismatch in number of weights ({} vs"
" {}).".format(layer, len(symbolic_weights), len(weight_values)))
continue
# Set values.
for i in range(len(weight_values)):
symbolic_shape = K.int_shape(symbolic_weights[i])
if symbolic_shape != weight_values[i].shape:
warnings.warn("Skipping loading of weights for layer {} due to mismatch in shape ({} vs"
" {}).".format(layer, symbolic_weights[i].shape, weight_values[i].shape))
continue
else:
weight_value_tuples.append((symbolic_weights[i],
weight_values[i]))
K.batch_set_value(weight_value_tuples)
def load_model(net,
file_path,
skip_mismatch=False):
"""
Load model state dictionary from a file.
Parameters:
----------
net : Model
Network in which weights are loaded.
file_path : str
Path to the file.
skip_mismatch : bool, default False
Whether to skip loading of layers with wrong names.
"""
# if (K.backend() == "mxnet") and (K.image_data_format() == "channels_first"):
# net.load_weights(filepath=file_path, by_name=skip_mismatch)
# return
with h5py.File(file_path, mode='r') as f:
if ("layer_names" not in f.attrs) and ("model_weights" in f):
f = f["model_weights"]
if ("keras_version" not in f.attrs) or ("backend" not in f.attrs):
raise ImportError("Unsupported version of Keras checkpoint file.")
# original_keras_version = f.attrs["keras_version"].decode("utf8")
original_backend = f.attrs["backend"].decode("utf8")
assert (original_backend == "mxnet")
if skip_mismatch:
_load_weights_from_hdf5_group_by_name(
f=f,
layers=net.layers)
else:
_load_weights_from_hdf5_group(
f=f,
layers=net.layers)
def download_model(net,
model_name,
local_model_store_dir_path=os.path.join("~", ".keras", "models")):
"""
Load model state dictionary from a file with downloading it if necessary.
Parameters:
----------
net : Module
Network in which weights are loaded.
model_name : str
Name of the model.
local_model_store_dir_path : str, default $TORCH_HOME/models
Location for keeping the model parameters.
"""
load_model(
net=net,
file_path=get_model_file(
model_name=model_name,
local_model_store_dir_path=local_model_store_dir_path))
| 27,367 | 49.869888 | 116 | py |
imgclsmob | imgclsmob-master/keras_/kerascv/models/zfnet.py | """
ZFNet for ImageNet-1K, implemented in Keras.
Original paper: 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
"""
__all__ = ['zfnet', 'zfnetb']
import os
from .common import is_channels_first
from .alexnet import alexnet_model
def get_zfnet(version="a",
model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create ZFNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of ZFNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[96], [256], [512, 1024, 512]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
else:
raise ValueError("Unsupported ZFNet version {}".format(version))
net = alexnet_model(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def zfnet(**kwargs):
"""
ZFNet model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_zfnet(model_name="zfnet", **kwargs)
def zfnetb(**kwargs):
"""
ZFNet-b model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_zfnet(version="b", model_name="zfnetb", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
zfnet,
zfnetb,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != zfnet or weight_count == 62357608)
assert (model != zfnetb or weight_count == 107627624)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 3,608 | 27.872 | 115 | py |
imgclsmob | imgclsmob-master/keras_/kerascv/models/darknet53.py | """
DarkNet-53 for ImageNet-1K, implemented in Keras.
Original source: 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
"""
__all__ = ['darknet53_model', 'darknet53']
import os
from keras import layers as nn
from keras.models import Model
from .common import conv1x1_block, conv3x3_block, is_channels_first, flatten
def dark_unit(x,
in_channels,
out_channels,
alpha,
name="dark_unit"):
"""
DarkNet unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
alpha : float
Slope coefficient for Leaky ReLU activation.
name : str, default 'dark_unit'
Unit name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation=nn.LeakyReLU(
alpha=alpha,
name=name + "/conv1/activ"),
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=nn.LeakyReLU(
alpha=alpha,
name=name + "/conv2/activ"),
name=name + "/conv2")
x = nn.add([x, identity], name=name + "/add")
return x
def darknet53_model(channels,
init_block_channels,
alpha=0.1,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
DarkNet-53 model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
(in_size[0], in_size[1], in_channels)
input = nn.Input(shape=input_shape)
x = conv3x3_block(
x=input,
in_channels=in_channels,
out_channels=init_block_channels,
activation=nn.LeakyReLU(
alpha=alpha,
name="features/init_block/activ"),
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
for j, out_channels in enumerate(channels_per_stage):
if j == 0:
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
activation=nn.LeakyReLU(
alpha=alpha,
name="features/stage{}/unit{}/active".format(i + 1, j + 1)),
name="features/stage{}/unit{}".format(i + 1, j + 1))
else:
x = dark_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
alpha=alpha,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = nn.AvgPool2D(
pool_size=7,
strides=1,
name="features/final_pool")(x)
# x = nn.Flatten()(x)
x = flatten(x)
x = nn.Dense(
units=classes,
input_dim=in_channels,
name="output")(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_darknet53(model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [2, 3, 9, 9, 5]
channels_per_layers = [64, 128, 256, 512, 1024]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = darknet53_model(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def darknet53(**kwargs):
"""
DarkNet-53 'Reference' model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_darknet53(model_name="darknet53", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
darknet53,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet53 or weight_count == 41609928)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 6,500 | 28.684932 | 115 | py |
imgclsmob | imgclsmob-master/keras_/kerascv/models/mobilenet.py | """
MobileNet & FD-MobileNet for ImageNet-1K, implemented in Keras.
Original papers:
- 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
- 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.
"""
__all__ = ['mobilenet', 'mobilenet_w1', 'mobilenet_w3d4', 'mobilenet_wd2', 'mobilenet_wd4', 'fdmobilenet_w1',
'fdmobilenet_w3d4', 'fdmobilenet_wd2', 'fdmobilenet_wd4']
import os
from keras import layers as nn
from keras.models import Model
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, is_channels_first, flatten
def dws_conv_block(x,
in_channels,
out_channels,
strides,
name="dws_conv_block"):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers. It is used as
a MobileNet unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
name : str, default 'dws_conv_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
x = dwconv3x3_block(
x=x,
in_channels=in_channels,
out_channels=in_channels,
strides=strides,
name=name + "/dw_conv")
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
name=name + "/pw_conv")
return x
def mobilenet(channels,
first_stage_stride,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861. Also this class implements FD-MobileNet from 'FD-MobileNet: Improved MobileNet
with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
first_stage_stride : bool
Whether stride is used at the first stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
(in_size[0], in_size[1], in_channels)
input = nn.Input(shape=input_shape)
init_block_channels = channels[0][0]
x = conv3x3_block(
x=input,
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels[1:]):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or first_stage_stride) else 1
x = dws_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = nn.AvgPool2D(
pool_size=7,
strides=1,
name="features/final_pool")(x)
# x = nn.Flatten()(x)
x = flatten(x)
x = nn.Dense(
units=classes,
input_dim=in_channels,
name="output")(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_mobilenet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create MobileNet or FD-MobileNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('orig' or 'fd').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if version == 'orig':
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]]
first_stage_stride = False
elif version == 'fd':
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]]
first_stage_stride = True
else:
raise ValueError("Unsupported MobileNet version {}".format(version))
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
net = mobilenet(
channels=channels,
first_stage_stride=first_stage_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def mobilenet_w1(**kwargs):
"""
1.0 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_mobilenet(version="orig", width_scale=1.0, model_name="mobilenet_w1", **kwargs)
def mobilenet_w3d4(**kwargs):
"""
0.75 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_mobilenet(version="orig", width_scale=0.75, model_name="mobilenet_w3d4", **kwargs)
def mobilenet_wd2(**kwargs):
"""
0.5 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_mobilenet(version="orig", width_scale=0.5, model_name="mobilenet_wd2", **kwargs)
def mobilenet_wd4(**kwargs):
"""
0.25 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_mobilenet(version="orig", width_scale=0.25, model_name="mobilenet_wd4", **kwargs)
def fdmobilenet_w1(**kwargs):
"""
FD-MobileNet 1.0x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_mobilenet(version="fd", width_scale=1.0, model_name="fdmobilenet_w1", **kwargs)
def fdmobilenet_w3d4(**kwargs):
"""
FD-MobileNet 0.75x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_mobilenet(version="fd", width_scale=0.75, model_name="fdmobilenet_w3d4", **kwargs)
def fdmobilenet_wd2(**kwargs):
"""
FD-MobileNet 0.5x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_mobilenet(version="fd", width_scale=0.5, model_name="fdmobilenet_wd2", **kwargs)
def fdmobilenet_wd4(**kwargs):
"""
FD-MobileNet 0.25x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_mobilenet(version="fd", width_scale=0.25, model_name="fdmobilenet_wd4", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
mobilenet_w1,
mobilenet_w3d4,
mobilenet_wd2,
mobilenet_wd4,
fdmobilenet_w1,
fdmobilenet_w3d4,
fdmobilenet_wd2,
fdmobilenet_wd4,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenet_w1 or weight_count == 4231976)
assert (model != mobilenet_w3d4 or weight_count == 2585560)
assert (model != mobilenet_wd2 or weight_count == 1331592)
assert (model != mobilenet_wd4 or weight_count == 470072)
assert (model != fdmobilenet_w1 or weight_count == 2901288)
assert (model != fdmobilenet_w3d4 or weight_count == 1833304)
assert (model != fdmobilenet_wd2 or weight_count == 993928)
assert (model != fdmobilenet_wd4 or weight_count == 383160)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,133 | 32.136905 | 119 | py |
imgclsmob | imgclsmob-master/keras_/kerascv/models/darknet.py | """
DarkNet for ImageNet-1K, implemented in Keras.
Original source: 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
"""
__all__ = ['darknet', 'darknet_ref', 'darknet_tiny', 'darknet19']
import os
from keras import layers as nn
from keras.models import Model
from .common import conv1x1_block, conv3x3_block, is_channels_first, flatten
def dark_convYxY(x,
in_channels,
out_channels,
alpha,
pointwise,
name="dark_convYxY"):
"""
DarkNet unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
alpha : float
Slope coefficient for Leaky ReLU activation.
pointwise : bool
Whether use 1x1 (pointwise) convolution or 3x3 convolution.
name : str, default 'dark_convYxY'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if pointwise:
return conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=nn.LeakyReLU(alpha=alpha, name=name + "/activ"),
name=name)
else:
return conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=nn.LeakyReLU(alpha=alpha, name=name + "/activ"),
name=name)
def darknet(channels,
odd_pointwise,
avg_pool_size,
cls_activ,
alpha=0.1,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
DarkNet model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
odd_pointwise : bool
Whether pointwise convolution layer is used for each odd unit.
avg_pool_size : int
Window size of the final average pooling.
cls_activ : bool
Whether classification convolution layer uses an activation.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
(in_size[0], in_size[1], in_channels)
input = nn.Input(shape=input_shape)
x = input
for i, channels_per_stage in enumerate(channels):
for j, out_channels in enumerate(channels_per_stage):
x = dark_convYxY(
x=x,
in_channels=in_channels,
out_channels=out_channels,
alpha=alpha,
pointwise=(len(channels_per_stage) > 1) and not(((j + 1) % 2 == 1) ^ odd_pointwise),
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
if i != len(channels) - 1:
x = nn.MaxPool2D(
pool_size=2,
strides=2,
name="features/pool{}".format(i + 1))(x)
x = nn.Conv2D(
filters=classes,
kernel_size=1,
name="output/final_conv")(x)
if cls_activ:
x = nn.LeakyReLU(alpha=alpha, name="output/final_activ")(x)
x = nn.AvgPool2D(
pool_size=avg_pool_size,
strides=1,
name="output/final_pool")(x)
# x = nn.Flatten()(x)
x = flatten(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_darknet(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('ref', 'tiny' or '19').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if version == "ref":
channels = [[16], [32], [64], [128], [256], [512], [1024]]
odd_pointwise = False
avg_pool_size = 3
cls_activ = True
elif version == "tiny":
channels = [[16], [32], [16, 128, 16, 128], [32, 256, 32, 256], [64, 512, 64, 512, 128]]
odd_pointwise = True
avg_pool_size = 14
cls_activ = False
elif version == "19":
channels = [[32], [64], [128, 64, 128], [256, 128, 256], [512, 256, 512, 256, 512],
[1024, 512, 1024, 512, 1024]]
odd_pointwise = False
avg_pool_size = 7
cls_activ = False
else:
raise ValueError("Unsupported DarkNet version {}".format(version))
net = darknet(
channels=channels,
odd_pointwise=odd_pointwise,
avg_pool_size=avg_pool_size,
cls_activ=cls_activ,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def darknet_ref(**kwargs):
"""
DarkNet 'Reference' model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_darknet(version="ref", model_name="darknet_ref", **kwargs)
def darknet_tiny(**kwargs):
"""
DarkNet Tiny model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_darknet(version="tiny", model_name="darknet_tiny", **kwargs)
def darknet19(**kwargs):
"""
DarkNet-19 model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_darknet(version="19", model_name="darknet19", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
keras.backend.set_learning_phase(0)
models = [
darknet_ref,
darknet_tiny,
darknet19,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet_ref or weight_count == 7319416)
assert (model != darknet_tiny or weight_count == 1042104)
assert (model != darknet19 or weight_count == 20842376)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 8,104 | 29.935115 | 116 | py |
imgclsmob | imgclsmob-master/keras_/kerascv/models/alexnet.py | """
AlexNet for ImageNet-1K, implemented in Keras.
Original paper: 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
"""
__all__ = ['alexnet_model', 'alexnet', 'alexnetb']
import os
from keras import layers as nn
from keras.models import Model
from .common import conv_block, maxpool2d, is_channels_first, flatten, lrn
def alex_conv(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_lrn,
name="alex_conv"):
"""
AlexNet specific convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_lrn : bool
Whether to use LRN layer.
name : str, default 'alex_conv'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
x = conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
use_bn=False,
name=name + "/conv")
if use_lrn:
x = lrn(x)
return x
def alex_dense(x,
in_channels,
out_channels,
name="alex_dense"):
"""
AlexNet specific dense block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
name : str, default 'alex_dense'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
x = nn.Dense(
units=out_channels,
input_dim=in_channels,
name=name + "/fc")(x)
x = nn.Activation("relu", name=name + "/activ")(x)
x = nn.Dropout(
rate=0.5,
name=name + "/dropout")(x)
return x
def alex_output_block(x,
in_channels,
classes,
name="alex_output_block"):
"""
AlexNet specific output block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
name : str, default 'alex_output_block'
Block name.
Returns:
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
mid_channels = 4096
x = alex_dense(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
name=name + "/fc1")
x = alex_dense(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
name=name + "/fc2")
x = nn.Dense(
units=classes,
input_dim=mid_channels,
name=name + "/fc3")(x)
return x
def alexnet_model(channels,
kernel_sizes,
strides,
paddings,
use_lrn,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
kernel_sizes : list of list of int
Convolution window sizes for each unit.
strides : list of list of int or tuple/list of 2 int
Strides of the convolution for each unit.
paddings : list of list of int or tuple/list of 2 int
Padding value for convolution layer for each unit.
use_lrn : bool
Whether to use LRN layer.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
(in_size[0], in_size[1], in_channels)
input = nn.Input(shape=input_shape)
x = input
for i, channels_per_stage in enumerate(channels):
use_lrn_i = use_lrn and (i in [0, 1])
for j, out_channels in enumerate(channels_per_stage):
x = alex_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_sizes[i][j],
strides=strides[i][j],
padding=paddings[i][j],
use_lrn=use_lrn_i,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
name="features/stage{}/pool".format(i + 1))
x = flatten(x, reshape=True)
x = alex_output_block(
x=x,
in_channels=(in_channels * 6 * 6),
classes=classes,
name="output")
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_alexnet(version="a",
model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create AlexNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of AlexNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[0], [2], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[64], [192], [384, 256, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[2], [2], [1, 1, 1]]
use_lrn = False
else:
raise ValueError("Unsupported AlexNet version {}".format(version))
net = alexnet_model(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def alexnet(**kwargs):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_alexnet(model_name="alexnet", **kwargs)
def alexnetb(**kwargs):
"""
AlexNet-b model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997. Non-standard version.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_alexnet(version="b", model_name="alexnetb", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
alexnet,
alexnetb,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alexnet or weight_count == 62378344)
assert (model != alexnetb or weight_count == 61100840)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 9,197 | 27.301538 | 115 | py |
ZOC | ZOC-main/cifarplus_eval.py | import argparse
import torch
from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig
import os
from dataloaders.ZO_Clip_loaders import cifarplus_loader
from clip.simple_tokenizer import SimpleTokenizer as clip_tokenizer
from tqdm import tqdm
import copy
import numpy as np
from sklearn.metrics import roc_auc_score
def tokenize_for_clip(batch_sentences, tokenizer):
default_length = 77 # CLIP default
sot_token = tokenizer.encoder['<|startoftext|>']
eot_token = tokenizer.encoder['<|endoftext|>']
tokenized_list = []
for sentence in batch_sentences:
text_tokens = [sot_token] + tokenizer.encode(sentence) + [eot_token]
tokenized = torch.zeros((default_length), dtype=torch.long)
tokenized[:len(text_tokens)] = torch.tensor(text_tokens)
tokenized_list.append(tokenized)
tokenized_list = torch.stack(tokenized_list)
return tokenized_list
def greedysearch_generation_topk(clip_embed):
max_len=77
N = 1 # batch has single sample
target_list = [torch.tensor(berttokenizer.bos_token_id)]
top_k_list = []
bert_model.eval()
for i in range(max_len):
target = torch.LongTensor(target_list).unsqueeze(0)
position_ids = torch.arange(0, len(target)).expand(N, len(target)).to(device)
with torch.no_grad():
out = bert_model(input_ids=target.to(device),
position_ids=position_ids,
attention_mask=torch.ones(len(target)).unsqueeze(0).to(device),
encoder_hidden_states=clip_embed.unsqueeze(1).to(device),
)
pred_idx = out.logits.argmax(2)[:, -1]
_, top_k = torch.topk(out.logits, dim=2, k=35)
top_k_list.append(top_k[:, -1].flatten())
target_list.append(pred_idx)
#if pred_idx == berttokenizer.eos_token_id or len(target_list)==10: #the entitiy word is in at most first 10 words
if len(target_list) == 10: # the entitiy word is in at most first 10 words
break
top_k_list = torch.cat(top_k_list)
return target_list, top_k_list
def image_decoder(clip_model, berttokenizer, device, in_loader, out_loaders):
seen_labels = ['airplane', 'automobile', 'ship', 'truck']
seen_descriptions = [f"This is a photo of a {label}" for label in seen_labels]
in_probs_sum = []
max_num_entities = 0
for idx, (image, label_idx) in enumerate(tqdm(in_loader)):
#if idx==10:break
with torch.no_grad():
clip_out = clip_model.encode_image(image.to(device)).float()
clip_extended_embed = clip_out.repeat(1, 2).type(torch.FloatTensor)
#greedy generation
target_list, topk_list = greedysearch_generation_topk(clip_extended_embed)
target_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in target_list]
topk_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in topk_list]
unique_entities = list(set(topk_tokens))
if len(unique_entities) > max_num_entities:
max_num_entities = len(unique_entities)
all_desc = seen_descriptions + [f"This is a photo of a {label}" for label in unique_entities]
all_desc_ids = tokenize_for_clip(all_desc, cliptokenizer)
image_feature = clip_model.encode_image(image.cuda()).float()
image_feature /= image_feature.norm(dim=-1, keepdim=True)
text_features = clip_model.encode_text(all_desc_ids.cuda()).float()
text_features /= text_features.norm(dim=-1, keepdim=True)
# print(image_features.size(), text_features.size())
zeroshot_probs = (100.0 * image_feature @ text_features.T).softmax(dim=-1).squeeze()
#detection score is accumulative sum of probs of generated entities
ood_prob_sum = np.sum(zeroshot_probs[len(seen_labels):].detach().cpu().numpy())
in_probs_sum.append(ood_prob_sum)
print('maximum number of predicted entities', max_num_entities)
ood_probs_sum_list = [[],[],[],[],[],[]]
for i, out_loader_name in enumerate(list(out_loaders.keys())):
print(out_loader_name)
out_loader = out_loaders[out_loader_name]
for idx, (image, label_idx) in enumerate(tqdm(out_loader)):
#if idx==10:break
with torch.no_grad():
clip_out = clip_model.encode_image(image.to(device)).float()
clip_extended_embed = clip_out.repeat(1, 2).type(torch.FloatTensor)
#greedy generation
target_list, topk_list = greedysearch_generation_topk(clip_extended_embed)
target_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in target_list]
topk_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in topk_list]
unique_entities = list(set(topk_tokens))
if len(unique_entities) > max_num_entities:
max_num_entities = len(unique_entities)
all_desc = seen_descriptions + [f"This is a photo of a {label}" for label in unique_entities]
#print(all_desc)
all_desc_ids = tokenize_for_clip(all_desc, cliptokenizer)
image_feature = clip_model.encode_image(image.cuda()).float()
image_feature /= image_feature.norm(dim=-1, keepdim=True)
text_features = clip_model.encode_text(all_desc_ids.cuda()).float()
text_features /= text_features.norm(dim=-1, keepdim=True)
# print(image_features.size(), text_features.size())
zeroshot_probs = (100.0 * image_feature @ text_features.T).softmax(dim=-1).squeeze()
#detection score is accumulative sum of probs of generated entities
ood_prob_sum = np.sum(zeroshot_probs[len(seen_labels):].detach().cpu().numpy())
ood_probs_sum_list[i].append(ood_prob_sum)
print('maximum number of predicted entities', max_num_entities)
for i, out_loader_name in enumerate(out_loaders.keys()):
out_loader = out_loaders[out_loader_name]
targets = torch.tensor(len(in_loader.dataset)*[0] + len(out_loader.dataset)*[1])
#targets = torch.tensor(10*[0] + 10*[1])
probs_sum = copy.deepcopy(in_probs_sum)
probs_sum.extend(ood_probs_sum_list[i])
auc_sum = roc_auc_score(np.array(targets), np.squeeze(probs_sum))
print(' OOD dataset : {}, sum_ood AUROC={}'.format(out_loader_name, auc_sum))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--trained_path', type=str, default='./trained_models/COCO/')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.saved_model_path = args.trained_path + '/ViT-B32/'
if not os.path.exists(args.saved_model_path):
os.makedirs(args.saved_model_path)
# initialize tokenizers for clip and bert, these two use different tokenizers
berttokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
clip_model = torch.jit.load(os.path.join('./trained_models', "{}.pt".format('ViT-B-32'))).to(device).eval()
cliptokenizer = clip_tokenizer()
bert_config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
bert_config.is_decoder=True
bert_config.add_cross_attention=True
bert_model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder',
config=bert_config).to(device).train()
bert_model.load_state_dict(torch.load(args.saved_model_path + 'model.pt')['net'])
in_loader, out_loaders = cifarplus_loader()
image_decoder(clip_model, berttokenizer, device, in_loader=in_loader, out_loaders=out_loaders) | 7,866 | 46.969512 | 122 | py |
ZOC | ZOC-main/cifar10_eval.py | import argparse
import torch
import os
from tqdm import tqdm
import numpy as np
from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig
from dataloaders.ZO_Clip_loaders import cifar10_single_isolated_class_loader
from clip.simple_tokenizer import SimpleTokenizer as clip_tokenizer
from sklearn.metrics import roc_auc_score
def tokenize_for_clip(batch_sentences, tokenizer):
default_length = 77 # CLIP default
sot_token = tokenizer.encoder['<|startoftext|>']
eot_token = tokenizer.encoder['<|endoftext|>']
tokenized_list = []
for sentence in batch_sentences:
text_tokens = [sot_token] + tokenizer.encode(sentence) + [eot_token]
tokenized = torch.zeros((default_length), dtype=torch.long)
tokenized[:len(text_tokens)] = torch.tensor(text_tokens)
tokenized_list.append(tokenized)
tokenized_list = torch.stack(tokenized_list)
return tokenized_list
def greedysearch_generation_topk(clip_embed):
N = 1 # batch has single sample
max_len=77
target_list = [torch.tensor(berttokenizer.bos_token_id)]
top_k_list = []
bert_model.eval()
for i in range(max_len):
target = torch.LongTensor(target_list).unsqueeze(0)
position_ids = torch.arange(0, len(target)).expand(N, len(target)).to(device)
with torch.no_grad():
out = bert_model(input_ids=target.to(device),
position_ids=position_ids,
attention_mask=torch.ones(len(target)).unsqueeze(0).to(device),
encoder_hidden_states=clip_embed.unsqueeze(1).to(device),
)
pred_idx = out.logits.argmax(2)[:, -1]
_, top_k = torch.topk(out.logits, dim=2, k=35)
top_k_list.append(top_k[:, -1].flatten())
target_list.append(pred_idx)
#if pred_idx == berttokenizer.eos_token_id or len(target_list)==10: #the entitiy word is in at most first 10 words
if len(target_list) == 10: # the entitiy word is in at most first 10 words
break
top_k_list = torch.cat(top_k_list)
return target_list, top_k_list
def image_decoder(clip_model, berttokenizer, device, image_loaders=None):
splits = [['airplane', 'automobile', 'truck', 'horse', 'cat', 'bird', 'ship', 'deer', 'dog', 'frog'],
['airplane', 'automobile', 'truck', 'horse', 'cat', 'bird', 'ship', 'deer', 'dog', 'frog'],
['airplane', 'bird', 'deer', 'cat', 'horse', 'dog', 'ship', 'automobile', 'frog', 'truck'],
['dog', 'automobile', 'truck', 'ship', 'horse', 'airplane', 'bird', 'cat', 'deer', 'frog'],
['dog', 'horse', 'automobile', 'ship', 'deer', 'frog', 'airplane', 'truck', 'bird', 'cat'],
['ship', 'automobile', 'dog', 'cat', 'deer', 'frog', 'airplane', 'truck', 'bird', 'horse']]
ablation_splits = [['airplane', 'automobile', 'truck', 'horse', 'cat', 'bird', 'ship', 'dog', 'deer', 'frog'],
['airplane', 'automobile', 'truck', 'bird', 'ship', 'frog', 'deer', 'dog', 'horse', 'cat']]
auc_list_sum = []
for split in ablation_splits:
seen_labels = split[:6]
seen_descriptions = [f"This is a photo of a {label}" for label in seen_labels]
targets = torch.tensor(6000*[0] + 4000*[1])
#targets = torch.tensor(8000*[0] + 2000*[1])
#targets = torch.tensor(20 * [0] + 20 * [1])
max_num_entities=0
ood_probs_sum = []
for i, semantic_label in enumerate(split):
loader = image_loaders[semantic_label]
for idx, image in enumerate(tqdm(loader)):
#if idx==10:break
with torch.no_grad():
clip_out = clip_model.encode_image(image.to(device)).float()
clip_extended_embed = clip_out.repeat(1, 2).type(torch.FloatTensor)
#greedy generation
target_list, topk_list = greedysearch_generation_topk(clip_extended_embed)
target_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in target_list]
topk_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in topk_list]
unique_entities = list(set(topk_tokens) - set(seen_labels))
if len(unique_entities) > max_num_entities:
max_num_entities = len(unique_entities)
all_desc = seen_descriptions + [f"This is a photo of a {label}" for label in unique_entities]
all_desc_ids = tokenize_for_clip(all_desc, cliptokenizer)
image_feature = clip_model.encode_image(image.cuda()).float()
image_feature /= image_feature.norm(dim=-1, keepdim=True)
text_features = clip_model.encode_text(all_desc_ids.cuda()).float()
text_features /= text_features.norm(dim=-1, keepdim=True)
zeroshot_probs = (100.0 * image_feature @ text_features.T).softmax(dim=-1).squeeze()
#detection score is accumulative sum of probs of generated entities
ood_prob_sum = np.sum(zeroshot_probs[6:].detach().cpu().numpy())
ood_probs_sum.append(ood_prob_sum)
auc_sum = roc_auc_score(np.array(targets), np.squeeze(ood_probs_sum))
print('sum_ood AUROC={}'.format(auc_sum))
auc_list_sum.append(auc_sum)
print('all auc scores:', auc_list_sum)
print('auc sum', np.mean(auc_list_sum), np.std(auc_list_sum))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--trained_path', type=str, default='./trained_models/COCO/')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.saved_model_path = args.trained_path + '/ViT-B32/'
if not os.path.exists(args.saved_model_path):
os.makedirs(args.saved_model_path)
# initialize tokenizers for clip and bert, these two use different tokenizers
berttokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
clip_model = torch.jit.load(os.path.join('./trained_models', "{}.pt".format('ViT-B32'))).to(device).eval()
cliptokenizer = clip_tokenizer()
bert_config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
bert_config.is_decoder=True
bert_config.add_cross_attention=True
bert_model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder',
config=bert_config).to(device).train()
bert_model.load_state_dict(torch.load(args.saved_model_path + 'model.pt')['net'])
cifar10_loaders = cifar10_single_isolated_class_loader()
image_decoder(clip_model, berttokenizer, device, image_loaders=cifar10_loaders)
| 6,916 | 51.007519 | 122 | py |
ZOC | ZOC-main/tinyimagenet_eval.py | import argparse
import torch
from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig
import os
from dataloaders.ZO_Clip_loaders import tinyimage_single_isolated_class_loader
from clip.simple_tokenizer import SimpleTokenizer as clip_tokenizer
from tqdm import tqdm
import numpy as np
from sklearn.metrics import roc_auc_score
def tokenize_for_clip(batch_sentences, tokenizer):
default_length = 77 # CLIP default
sot_token = tokenizer.encoder['<|startoftext|>']
eot_token = tokenizer.encoder['<|endoftext|>']
tokenized_list = []
for sentence in batch_sentences:
text_tokens = [sot_token] + tokenizer.encode(sentence) + [eot_token]
tokenized = torch.zeros((default_length), dtype=torch.long)
tokenized[:len(text_tokens)] = torch.tensor(text_tokens)
tokenized_list.append(tokenized)
tokenized_list = torch.stack(tokenized_list)
return tokenized_list
def greedysearch_generation_topk(clip_embed):
max_len=77
N = 1 # batch has single sample
target_list = [torch.tensor(berttokenizer.bos_token_id)]
top_k_list = []
bert_model.eval()
for i in range(max_len):
target = torch.LongTensor(target_list).unsqueeze(0)
position_ids = torch.arange(0, len(target)).expand(N, len(target)).to(device)
with torch.no_grad():
out = bert_model(input_ids=target.to(device),
position_ids=position_ids,
attention_mask=torch.ones(len(target)).unsqueeze(0).to(device),
encoder_hidden_states=clip_embed.unsqueeze(1).to(device),
)
pred_idx = out.logits.argmax(2)[:, -1]
_, top_k = torch.topk(out.logits, dim=2, k=35)
top_k_list.append(top_k[:, -1].flatten())
target_list.append(pred_idx)
#if pred_idx == berttokenizer.eos_token_id or len(target_list)==10: #the entitiy word is in at most first 10 words
if len(target_list) == 10: # the entitiy word is in at most first 10 words
break
top_k_list = torch.cat(top_k_list)
return target_list, top_k_list
def image_decoder(clip_model, berttokenizer, device, split, image_loaders=None):
seen_labels = split[:20]
seen_descriptions = [f"This is a photo of a {label}" for label in seen_labels]
targets = torch.tensor(1000*[0] + 9000*[1])
#targets = torch.tensor(100*[0] + 900*[1])
ood_probs_sum = []
max_num_entities = 0
for i, semantic_label in enumerate(split):
loader = image_loaders[semantic_label]
for idx, image in enumerate(tqdm(loader)):
#if idx==5:break
with torch.no_grad():
clip_out = clip_model.encode_image(image.to(device)).float()
clip_extended_embed = clip_out.repeat(1, 2).type(torch.FloatTensor)
#greedy generation
target_list, topk_list = greedysearch_generation_topk(clip_extended_embed)
target_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in target_list]
topk_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in topk_list]
unique_entities = list(set(topk_tokens))
if len(unique_entities) > max_num_entities:
max_num_entities = len(unique_entities)
all_desc = seen_descriptions + [f"This is a photo of a {label}" for label in unique_entities]
all_desc_ids = tokenize_for_clip(all_desc, cliptokenizer)
image_feature = clip_model.encode_image(image.cuda()).float()
image_feature /= image_feature.norm(dim=-1, keepdim=True)
text_features = clip_model.encode_text(all_desc_ids.cuda()).float()
text_features /= text_features.norm(dim=-1, keepdim=True)
zeroshot_probs = (100.0 * image_feature @ text_features.T).softmax(dim=-1).squeeze()
#detection score is accumulative sum of probs of generated entities
ood_prob_sum = np.sum(zeroshot_probs[20:].detach().cpu().numpy())
ood_probs_sum.append(ood_prob_sum)
auc_sum = roc_auc_score(np.array(targets), np.squeeze(ood_probs_sum))
print(' sum_ood AUROC={}'.format(auc_sum))
return auc_sum
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--trained_path', type=str, default='./trained_models/COCO/')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.saved_model_path = args.trained_path + '/ViT-B32/'
if not os.path.exists(args.saved_model_path):
os.makedirs(args.saved_model_path)
# initialize tokenizers for clip and bert, these two use different tokenizers
berttokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
clip_model = torch.jit.load(os.path.join('./trained_models', "{}.pt".format('ViT-B-32'))).to(device).eval()
cliptokenizer = clip_tokenizer()
bert_config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
bert_config.is_decoder=True
bert_config.add_cross_attention=True
bert_model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder',
config=bert_config).to(device).train()
bert_model.load_state_dict(torch.load(args.saved_model_path + 'model.pt')['net'])
splits, tinyimg_loaders = tinyimage_single_isolated_class_loader()
max_scores = []
sum_scores = []
mean_scores = []
for split in splits:
sum_score = image_decoder(clip_model, berttokenizer, device, split=split, image_loaders=tinyimg_loaders)
sum_scores.append(sum_score)
print('sum auc on 5 splits:', sum_scores)
print('average on 5 splits with sum score:', np.mean(sum_scores), 'stdv:', np.std(sum_scores))
| 5,965 | 44.892308 | 122 | py |
ZOC | ZOC-main/train_decoder.py | import argparse
import torch
from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig
import os
from dataloaders.coco_full_loader import get_loader
from clip.simple_tokenizer import SimpleTokenizer as clip_tokenizer
from transformers import AdamW
from tqdm import tqdm
def train_decoder(bert_model, train_loader, eval_loader, optimizer):
num_batch = len(iter(train_loader))
for epoch in range(args.num_epochs):
acc_loss = 0
print('Training : epoch {}'.format(epoch))
for i, batch in enumerate(tqdm(train_loader)):
#if i==1:break
input_ids, attention_mask, label_ids, clip_embeds = batch
clip_extended_embed = clip_embeds.repeat(1, 2).type(torch.FloatTensor)
N, seq_length = input_ids.shape
position_ids = torch.arange(0, seq_length).expand(N, seq_length)
bert_model.train()
out = bert_model(input_ids=input_ids.to(device),
position_ids=position_ids.to(device),
attention_mask=attention_mask.to(device),
encoder_hidden_states=clip_extended_embed.unsqueeze(1).to(device),
labels=label_ids.to(device))
out.loss.backward(retain_graph=False)
optimizer.step()
optimizer.zero_grad()
acc_loss += out.loss.detach().item()
validation_loss = eval_decoder(bert_model, eval_loader)
print('validation loss in this epoch: ', validation_loss)
state = {'net': bert_model.state_dict(),
'epoch': epoch,
'validation loss': validation_loss}
if epoch == 0:
best_val_loss = validation_loss
torch.save(state, args.saved_model_path+'model_dump.pt')
else:
if validation_loss < best_val_loss :
best_val_loss = validation_loss
torch.save(state, args.saved_model_path+'model.pt')
print('Average loss on {} training batches in this epoch:{}\n'.format(num_batch, acc_loss/num_batch))
return acc_loss
def eval_decoder(bert_model, eval_loader):
num_batch = len(iter(eval_loader))
print('evaluating loss on validation data ...')
acc_loss = 0
bert_model.eval()
with torch.no_grad():
for i, batch in enumerate(tqdm(eval_loader)):
input_ids, attention_mask, label_ids, clip_embeds = batch
clip_extended_embed = clip_embeds.repeat(1, 2).type(torch.FloatTensor)
N, seq_length = input_ids.shape
position_ids = torch.arange(0, seq_length).expand(N, seq_length)
out = bert_model(input_ids=input_ids.to(device),
position_ids=position_ids.to(device),
attention_mask=attention_mask.to(device),
encoder_hidden_states=clip_extended_embed.unsqueeze(1).to(device),
labels=label_ids.to(device))
acc_loss += out.loss.detach().item()
print('Average loss on {} validation batches={}\n'.format(num_batch, acc_loss/num_batch))
return acc_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=1e-5, help="Learning rate")
parser.add_argument('--gamma', type=float, default=0.5)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--num_epochs', type=int, default=1, help="End epoch") # trained with 25 epochs
parser.add_argument('--trained_path', type=str, default='./trained_models/COCO/')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.saved_model_path = args.trained_path + '/ViT-B32/'
if not os.path.exists(args.saved_model_path):
os.makedirs(args.saved_model_path)
# initialize tokenizers for clip and bert, these two use different tokenizers
berttokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
cliptokenizer = clip_tokenizer()
# loader to get preprocessed and encoded (image, caption) from COCO dataset
train_loader = get_loader(train=True, clip_backbone='ViT-B32')
eval_loader = get_loader(train=False, clip_backbone='ViT-B32')
# load clip pretrained image encoder
clip_model = torch.jit.load(os.path.join('./trained_models', "{}.pt".format('ViT-B32'))).to(device).eval()
bert_config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
bert_config.is_decoder=True
bert_config.add_cross_attention=True
bert_model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder',
config=bert_config).to(device).train()
optimizer = AdamW(bert_model.parameters(), lr=args.lr)
loss = train_decoder(bert_model, train_loader, eval_loader, optimizer)
print('final training loss={}'.format(loss)) | 5,162 | 45.513514 | 110 | py |
ZOC | ZOC-main/cifar100_eval.py | import argparse
import torch
from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig
import os
from dataloaders.ZO_Clip_loaders import cifar100_single_isolated_class_loader
from clip.simple_tokenizer import SimpleTokenizer as clip_tokenizer
from tqdm import tqdm
import numpy as np
from sklearn.metrics import roc_auc_score
import torchvision
import time
def tokenize_for_clip(batch_sentences, tokenizer):
default_length = 77 # CLIP default
sot_token = tokenizer.encoder['<|startoftext|>']
eot_token = tokenizer.encoder['<|endoftext|>']
tokenized_list = []
for sentence in batch_sentences:
text_tokens = [sot_token] + tokenizer.encode(sentence) + [eot_token]
tokenized = torch.zeros((default_length), dtype=torch.long)
tokenized[:len(text_tokens)] = torch.tensor(text_tokens)
tokenized_list.append(tokenized)
tokenized_list = torch.stack(tokenized_list)
return tokenized_list
def greedysearch_generation_topk(clip_embed):
max_len=77
N = 1 # batch has single sample
max_len=77
target_list = [torch.tensor(berttokenizer.bos_token_id)]
top_k_list = []
bert_model.eval()
for i in range(max_len):
target = torch.LongTensor(target_list).unsqueeze(0)
position_ids = torch.arange(0, len(target)).expand(N, len(target)).to(device)
with torch.no_grad():
out = bert_model(input_ids=target.to(device),
position_ids=position_ids,
attention_mask=torch.ones(len(target)).unsqueeze(0).to(device),
encoder_hidden_states=clip_embed.unsqueeze(1).to(device),
)
pred_idx = out.logits.argmax(2)[:, -1]
_, top_k = torch.topk(out.logits, dim=2, k=35)
top_k_list.append(top_k[:, -1].flatten())
target_list.append(pred_idx)
if len(target_list) == 10: # the entitiy word is in at most first 10 words
break
top_k_list = torch.cat(top_k_list)
return target_list, top_k_list
def image_decoder(clip_model, berttokenizer, device, split, image_loaders=None):
seen_labels = split[:20]
seen_descriptions = [f"This is a photo of a {label}" for label in seen_labels]
targets = torch.tensor(2000*[0] + 8000*[1])
ood_probs_sum = []
max_num_entities = 0
for i, semantic_label in enumerate(split):
loader = image_loaders[semantic_label]
start_time = time.time()
for idx, image in enumerate(tqdm(loader)):
with torch.no_grad():
clip_out = clip_model.encode_image(image.to(device)).float()
clip_extended_embed = clip_out.repeat(1, 2).type(torch.FloatTensor)
#greedy generation
target_list, topk_list = greedysearch_generation_topk(clip_extended_embed)
target_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in target_list]
topk_tokens = [berttokenizer.decode(int(pred_idx.cpu().numpy())) for pred_idx in topk_list]
unique_entities = list(set(topk_tokens) - set(seen_labels))
if len(unique_entities) > max_num_entities:
max_num_entities = len(unique_entities)
all_desc = seen_descriptions + [f"This is a photo of a {label}" for label in unique_entities]
all_desc_ids = tokenize_for_clip(all_desc, cliptokenizer)
image_feature = clip_model.encode_image(image.cuda()).float()
image_feature /= image_feature.norm(dim=-1, keepdim=True)
text_features = clip_model.encode_text(all_desc_ids.cuda()).float()
text_features /= text_features.norm(dim=-1, keepdim=True)
zeroshot_probs = (100.0 * image_feature @ text_features.T).softmax(dim=-1).squeeze()
#detection score is accumulative sum of probs of generated entities
ood_prob_sum = np.sum(zeroshot_probs[20:].detach().cpu().numpy())
ood_probs_sum.append(ood_prob_sum)
end_time=time.time()
auc_sum = roc_auc_score(np.array(targets), np.squeeze(ood_probs_sum))
print('sum_ood AUROC={}'.format(auc_sum))
return auc_sum
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--trained_path', type=str, default='./trained_models/COCO/')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.saved_model_path = args.trained_path + '/ViT-B32/'
if not os.path.exists(args.saved_model_path):
os.makedirs(args.saved_model_path)
# initialize tokenizers for clip and bert, these two use different tokenizers
berttokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
clip_model = torch.jit.load(os.path.join('./trained_models', "{}.pt".format('ViT-B-32'))).to(device).eval()
cliptokenizer = clip_tokenizer()
bert_config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
bert_config.is_decoder=True
bert_config.add_cross_attention=True
bert_model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder', config=bert_config).to(device).train()
bert_model.load_state_dict(torch.load(args.saved_model_path + 'model.pt')['net'])
cifar100_loaders = cifar100_single_isolated_class_loader()
dset = torchvision.datasets.CIFAR100(root='./data/', train=False, download=True)
idx2class = {v:k for k,v in dset.class_to_idx.items()}
splits = [list(range(20)), list(range(20, 40)), list(range(40, 60)), list(range(60, 80)), list(range(80, 100))]
auc_list = []
idx_splits = []
for seen_split in splits:
split=seen_split+list(set(list(range(100)))-set(seen_split))
label_split = [idx2class[idx] for idx in split]
print(label_split)
auc = image_decoder(clip_model, berttokenizer, device, label_split, image_loaders=cifar100_loaders)
auc_list.append(auc)
print(auc_list)
print('mean AUC={}'.format(np.mean(auc_list)), ' std={}'.format(np.std(auc_list)))
| 6,168 | 45.037313 | 144 | py |
ZOC | ZOC-main/dataloaders/ZO_Clip_loaders.py | from torch.utils.data import DataLoader, Dataset
import numpy as np
import os
from torchvision.datasets import CIFAR10, CIFAR100
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, ToPILImage
from PIL import Image
from torchvision.datasets import ImageFolder
import glob
class cifar10_isolated_class(Dataset):
def __init__(self, class_label=None):
assert class_label, 'a semantic label should be specified'
super(cifar10_isolated_class, self).__init__()
self.transform = Compose([
ToPILImage(),
Resize(224, interpolation=Image.BICUBIC),
CenterCrop(224),
ToTensor(),
Normalize((0.4913, 0.4821, 0.4465), (0.2470, 0.2434, 0.2615))
])
cifar10 = CIFAR10(root='./data', train=False, download=True)
class_mask = np.array(cifar10.targets) == cifar10.class_to_idx[class_label]
self.data = cifar10.data[class_mask]
self.targets = np.array(cifar10.targets)[class_mask]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.transform(self.data[index])
def cifar10_single_isolated_class_loader():
splits = [[0, 1, 9, 7, 3, 2],
[0, 2, 4, 3, 7, 5],
[5, 1, 9, 8, 7, 0],
[5, 7, 1, 8, 4, 6],
[8, 1, 5, 3, 4, 6]]
loaders_dict = {}
cifar10_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
#cifar10_labels = ['automobile', 'ship']
for label in cifar10_labels:
dataset = cifar10_isolated_class(label)
loader = DataLoader(dataset=dataset, batch_size=1, num_workers=4)
loaders_dict[label] = loader
return loaders_dict
class cifar100_isolated_class(Dataset):
def __init__(self, class_label=None):
assert class_label, 'a semantic label should be specified'
super(cifar100_isolated_class, self).__init__()
superclass_list = [['aquatic mammals', 'beaver', 'dolphin', 'otter', 'seal', 'whale'],
['fish', 'aquarium_fish', 'flatfish', 'ray', 'shark', 'trout'],
['flowers', 'orchid','poppy', 'rose', 'sunflower', 'tulip'],
['food container', 'bottle', 'bowl', 'can', 'cup', 'plate'],
['fruit and vegetables', 'apple', 'mushroom', 'orange', 'pear', 'sweet_pepper'],
['household electrical devices', 'clock', 'keyboard', 'lamp', 'telephone', 'television'],
['household furniture', 'bed', 'chair', 'couch', 'table', 'wardrobe'],
['insects', 'bee', 'beetle', 'butterfly', 'caterpillar', 'cockroach'],
['large carnivores', 'bear', 'leopard', 'lion', 'tiger', 'wolf'],
['large man-made outdoor things', 'bridge', 'castle', 'house', 'road', 'skyscraper'],
['large natural outdoor scenes', 'cloud', 'forest', 'mountain', 'plain', 'sea'],
['large omnivores and herbivores', 'camel', 'cattle', 'chimpanzee', 'elephant', 'kangaroo'],
['medium-sized mammals', 'fox', 'porcupine', 'possum', 'raccoon', 'skunk'],
['non-insect invertebrates', 'crab', 'lobster', 'snail', 'spider', 'worm'],
['people', 'baby', 'boy', 'girl', 'man', 'woman'],
['reptiles', 'crocodile', 'dinosaur', 'lizard', 'snake', 'turtle'],
['small mammals', 'hamster', 'mouse', 'rabbit', 'shrew', 'squirrel'],
['trees', 'maple_tree', 'oak_tree', 'palm_tree', 'pine_tree', 'willow_tree'],
['vehicles', 'bicycle', 'bus', 'motorcycle', 'pickup_truck', 'train'],
['large vehicles', 'lawn_mower', 'rocket', 'streetcar', 'tank', 'tractor']]
fine_to_coarse_dict = {}
for superclass in superclass_list:
fine_to_coarse_dict.update(dict.fromkeys(superclass[1:], superclass[0]))
self.fine_label = class_label
self.coarse_label = fine_to_coarse_dict[class_label]
self.transform = Compose([
ToPILImage(),
Resize(224, interpolation=Image.BICUBIC),
CenterCrop(224),
ToTensor(),
Normalize((0.4913, 0.4821, 0.4465), (0.2470, 0.2434, 0.2615))
])
cifar100 = CIFAR100(root='./data', train=False, download=True)
self.coarse_label = fine_to_coarse_dict[class_label]
class_mask = np.array(cifar100.targets) == cifar100.class_to_idx[class_label]
self.data = cifar100.data[class_mask]
self.targets = np.array(cifar100.targets)[class_mask]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.transform(self.data[index])
def cifar100_single_isolated_class_loader():
loaders_dict = {}
cifar100_labels = ['beaver', 'dolphin', 'otter', 'seal', 'whale',
'aquarium_fish', 'flatfish', 'ray', 'shark', 'trout',
'orchid','poppy', 'rose', 'sunflower', 'tulip',
'bottle', 'bowl', 'can', 'cup', 'plate',
'apple', 'mushroom', 'orange', 'pear', 'sweet_pepper',
'clock', 'keyboard', 'lamp', 'telephone', 'television',
'bed', 'chair', 'couch', 'table', 'wardrobe',
'bee', 'beetle', 'butterfly', 'caterpillar', 'cockroach',
'bear', 'leopard', 'lion', 'tiger', 'wolf',
'bridge', 'castle', 'house', 'road', 'skyscraper',
'cloud', 'forest', 'mountain', 'plain', 'sea',
'camel', 'cattle', 'chimpanzee', 'elephant', 'kangaroo',
'fox', 'porcupine', 'possum', 'raccoon', 'skunk',
'crab', 'lobster', 'snail', 'spider', 'worm',
'baby', 'boy', 'girl', 'man', 'woman',
'crocodile', 'dinosaur', 'lizard', 'snake', 'turtle',
'hamster', 'mouse', 'rabbit', 'shrew', 'squirrel',
'maple_tree', 'oak_tree', 'palm_tree', 'pine_tree', 'willow_tree',
'bicycle', 'bus', 'motorcycle', 'pickup_truck', 'train',
'lawn_mower', 'rocket', 'streetcar', 'tank', 'tractor']
for label in cifar100_labels:
dataset = cifar100_isolated_class(label)
loader = DataLoader(dataset=dataset, batch_size=1, num_workers=4)
loaders_dict[label] = loader
return loaders_dict
class cifarplus():
def __init__(self, class_list):
self.class_list = class_list
self.transform = Compose([
Resize(224, interpolation=Image.BICUBIC), # 224 for vit, 288 for res50x4
CenterCrop(224), # 224 for vit, 288 for res50x4
ToTensor(),
Normalize((0.4913, 0.4821, 0.4465), (0.2470, 0.2434, 0.2615))
])
if len(self.class_list) == 4:
cifar10 = CIFAR10(root='./data', train=False, download=True, transform=self.transform)
inds = [i for i in range(len(cifar10.targets)) if cifar10.targets[i] in self.class_list]
self.data = cifar10.data[inds]
self.targets = np.array(cifar10.targets)[inds].tolist()
else:
cifar100 = CIFAR100(root='./data', train=False, download=True, transform=self.transform)
inds = [i for i in range(len(cifar100.targets)) if cifar100.targets[i] in self.class_list]
self.data = cifar100.data[inds]
self.targets = np.array(cifar100.targets)[inds].tolist()
def __len__(self):
return len(self.targets)
def __getitem__(self, index):
img = self.transform(Image.fromarray(self.data[index]).convert('RGB'))
return img, self.targets[index]
def cifarplus_loader():
in_list = [0, 1, 8, 9]
out_dict = {'plus50': [4, 30, 55, 72, 95, 1, 32, 67, 73, 91, 6, 7, 14, 18, 24, 3, 42, 43, 88, 97, 15, 19, 21, 31, 38,
34, 63, 64, 66, 75, 26, 45, 77, 79, 99, 2, 11, 35, 46, 98, 27, 29, 44, 78, 93, 36, 50, 65, 74, 80],
'plus10-1':[43, 36, 24, 18, 80, 98, 30, 93, 78, 3],
'plus10-2':[74, 91, 98, 79, 50, 66, 24, 26, 6, 42],
'plus10-3':[79, 63, 36, 4, 29, 55, 75, 46, 72, 38],
'plus10-4':[95, 93, 26, 43, 36, 27, 18, 30, 64, 32],
'plus10-5':[88, 18, 19, 24, 65, 50, 4, 93, 35, 46]}
in_dataset = cifarplus(in_list)
in_loader = DataLoader(dataset=in_dataset, batch_size=1, num_workers=4, shuffle=False)
out_loaders = {}
for key in out_dict.keys():
out_dataset = cifarplus(out_dict[key])
out_loaders[key] = DataLoader(dataset=out_dataset, batch_size=1, num_workers=4, shuffle=False)
return in_loader, out_loaders
def tinyimage_semantic_spit_generator():
tinyimage_splits = [
[192, 112, 145, 107, 91, 180, 144, 193, 10, 125, 186, 28, 72, 124, 54, 77, 157, 169, 104, 166],
[156, 157, 167, 175, 153, 11, 147, 0, 199, 171, 132, 60, 87, 190, 101, 111, 193, 71, 131, 192],
[28, 15, 103, 33, 90, 167, 61, 13, 124, 159, 49, 12, 54, 78, 82, 107, 80, 25, 140, 46],
[128, 132, 123, 72, 154, 35, 86, 10, 188, 28, 85, 89, 91, 82, 116, 65, 96, 41, 134, 25],
[102, 79, 47, 106, 59, 93, 145, 10, 62, 175, 76, 183, 48, 130, 38, 186, 44, 8, 29, 26]] # CAC splits
dataset = ImageFolder(root='./data/tiny-imagenet-200/val')
a=dataset.class_to_idx
reverse_a = {v:k for k,v in a.items()}
semantic_splits = [[],[],[],[],[]]
for i, split in enumerate(tinyimage_splits):
wnid_split = []
for idx in split:
wnid_split.append(reverse_a[idx])
all = list(dataset.class_to_idx.keys())
seen = wnid_split
unseen = list(set(all)-set(seen))
seen.extend(unseen)
f = open('./dataloaders/imagenet_id_to_label.txt', 'r')
imagenet_id_idx_semantic = f.readlines()
for id in seen:
for line in imagenet_id_idx_semantic:
if id == line[:-1].split(' ')[0]:
semantic_label = line[:-1].split(' ')[2]
semantic_splits[i].append(semantic_label)
break
return semantic_splits
class tinyimage_isolated_class(Dataset):
def __init__(self, label, mappings):
assert label, 'a semantic label should be specified'
super(tinyimage_isolated_class, self).__init__()
path = './data/tiny-imagenet-200/val/'
#path = '/Users/Sepid/data/tiny-imagenet-200/val/'
self.image_paths = glob.glob(os.path.join(path, mappings[label], '*.JPEG'))
self.transform = Compose([
Resize(224, interpolation=Image.BICUBIC),
CenterCrop(224),
ToTensor(),
Normalize((0.4913, 0.4821, 0.4465), (0.2470, 0.2434, 0.2615))
])
def __len__(self):
return len(self.image_paths)
def __getitem__(self, index):
x = Image.open(self.image_paths[index]).convert('RGB')
if self.transform:
x = self.transform(x)
return x
def tinyimage_single_isolated_class_loader():
semantic_splits = tinyimage_semantic_spit_generator()
f = open('./dataloaders/tinyimagenet_labels_to_ids.txt', 'r')
#f = open('../tinyimagenet_ids_to_label.txt', 'r')
tinyimg_label2folder = f.readlines()
mappings_dict = {}
for line in tinyimg_label2folder:
label, class_id = line[:-1].split(' ')[0], line[:-1].split(' ')[1]
mappings_dict[label] = class_id
loaders_dict = {}
for semantic_label in mappings_dict.keys():
dataset = tinyimage_isolated_class(semantic_label, mappings_dict)
loader = DataLoader(dataset=dataset, batch_size=1, num_workers=4)
loaders_dict[semantic_label] = loader
return semantic_splits, loaders_dict
if __name__ == '__main__':
splits = [[0, 1, 9, 7, 3, 2],
[0, 2, 4, 3, 7, 5],
[5, 1, 9, 8, 7, 0],
[5, 7, 1, 8, 4, 6],
[8, 1, 5, 3, 4, 6]]
dset = CIFAR100(root='/Users/Sepid/data')
idx2cls = {v:k for k,v in dset.class_to_idx.items()}
for i, split in enumerate(splits):
print('split{}'.format(i))
ls=[]
for idx in split:
ls.append(idx2cls[idx])
print(set(dset.class_to_idx.keys())-set(ls)) | 12,638 | 46.515038 | 122 | py |
ZOC | ZOC-main/dataloaders/coco_full_loader.py | from torch.utils.data import DataLoader, TensorDataset
import torch
import numpy as np
import os
from torchvision.datasets import CocoDetection
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from PIL import Image
from tqdm import tqdm
from transformers import BertGenerationTokenizer
import copy
class my_coco_detetction():
def __init__(self, train=True):
if train:
filename = 'train2017'
else:
filename = 'val2017'
print('file is val')
super(my_coco_detetction, self).__init__()
self.transform = Compose([
Resize(224, interpolation=Image.BICUBIC), # 224 for vit, 288 for res50x4
CenterCrop(224), # 224 for vit, 288 for res50x4
ToTensor(),
Normalize((0.4913, 0.4821, 0.4465), (0.2470, 0.2434, 0.2615))
])
self.coco_dataset = CocoDetection(root=os.path.join('./data/MS-COCO/images', filename),
annFile=os.path.join('./data/MS-COCO/annotations', 'captions_{}.json'.format(filename)))
def __len__(self):
return len(self.coco_dataset)
def __getitem__(self, index):
img = self.transform(self.coco_dataset[index][0])
captions = self.coco_dataset[index][1]
cap_list=[]
for i, caption in enumerate(captions):
if i==5:
#print('more than 5 captions for this image', index)
break
cap = caption['caption']
cap_list.append(cap)
if len(cap_list)<5:
print('has less than 5 captions', index)
return img, cap_list
def get_clip_image_features(coco_dataset, split, clip_backbone, device):
clip_model = torch.jit.load(os.path.join('./trained_models', "{}.pt".format(clip_backbone))).to(device).eval()
if os.path.isfile('./dataloaders/processed_coco/{}/5xCaptions/full_coco_clip_features_{}.npy'.format(clip_backbone, split)):
with open('./dataloaders/processed_coco/{}/5xCaptions/full_coco_clip_features_{}.npy'.format(clip_backbone, split), 'rb') as e:
clip_out_all = np.load(e, allow_pickle=True)
else:
print('calculating all clip image encoder features')
loader = DataLoader(dataset=coco_dataset, batch_size=128, shuffle=False, collate_fn=collate_fn)
clip_out_all = []
with torch.no_grad():
for i, (images, annot) in enumerate(tqdm(loader)):
#if i == 1: break
images =torch.stack(images)
clip_out = clip_model.encode_image(images.to(device))
clip_out_all.append(clip_out.cpu().numpy())
clip_out_all = np.concatenate(clip_out_all)
with open('./dataloaders/processed_coco/{}/5xCaptions/full_coco_clip_features_{}.npy'.format(clip_backbone, split), 'wb') as e:
np.save(e, clip_out_all, allow_pickle=True)
return clip_out_all
def get_bos_sentence_eos(coco_dataset, berttokenizer, split, clip_backbone):
if os.path.isfile('./dataloaders/processed_coco/{}/5xCaptions/full_coco_processed_annot_{}.npy'.format(clip_backbone, split)):
with open('./dataloaders/processed_coco/{}/5xCaptions/full_coco_processed_annot_{}.npy'.format(clip_backbone, split), 'rb') as e:
bos_sentence_eos = np.load(e, allow_pickle=True)
bos_sentence_eos = bos_sentence_eos.tolist()
else:
print('preprocessing all sentences...')
bos_sentence_eos = []
for i, (image, captions) in enumerate(tqdm(coco_dataset)):
#if i==128:break
for caption in captions:
bos_sentence_eos.append(berttokenizer.bos_token + ' ' + caption + ' ' + berttokenizer.eos_token)
with open('./dataloaders/processed_coco/{}/5xCaptions/full_coco_processed_annot_{}.npy'.format(clip_backbone, split), 'wb') as e:
np.save(e, bos_sentence_eos, allow_pickle=True)
return bos_sentence_eos
def get_bert_training_features(coco_dataset, train, clip_backbone):
berttokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
sentences = get_bos_sentence_eos(coco_dataset, berttokenizer, train, clip_backbone)
print('tokenizing all processed sentences...')
tokenized = berttokenizer(sentences, padding=True,
truncation=True, max_length=77,
return_token_type_ids=False, return_tensors='np')
label_ids = copy.deepcopy(tokenized['input_ids'])
label_ids[label_ids == 0] = -100
input_ids = tokenized['input_ids']
attention_mask = tokenized['attention_mask']
return input_ids, attention_mask, label_ids
def collate_fn(batch):
return tuple(zip(*batch))
def get_loader(train, clip_backbone):
if train:
split='train'
else:
split='val'
coco_dataset = my_coco_detetction(train)
clip_features = get_clip_image_features(coco_dataset, split, clip_backbone, device='cuda')
input_ids, attention_mask, label_ids = get_bert_training_features(coco_dataset, split, clip_backbone)
input_ids = torch.tensor(input_ids, dtype=torch.long)
attention_mask = torch.tensor(attention_mask, dtype=torch.long)
label_ids = torch.tensor(label_ids, dtype=torch.long)
clip_features = torch.tensor(clip_features, dtype=torch.long)
print(input_ids.size(), attention_mask.size(), label_ids.size(), clip_features.size())
hidden_size = clip_features.size(1)
print(clip_features.repeat(1,5).view(-1, hidden_size).size())
dataset = TensorDataset(input_ids, attention_mask, label_ids, clip_features.repeat(1,5).view(-1, hidden_size))
loader = DataLoader(dataset=dataset, batch_size=128, num_workers=8, shuffle=True)
return loader
if __name__=='__main__':
#with open('./processed_coco/{}/coco_clip_features_{}.npy'.format('ViT-B32', 'train'),'rb') as e:
# clip_out_all = np.load(e, allow_pickle=True)
#print(np.shape(clip_out_all))
dset = my_coco_detetction(train=True)
max_length=0
for i, (image, captions) in enumerate(tqdm(dset)):
pass
| 6,133 | 43.129496 | 137 | py |
cppflow | cppflow-master/examples/load_frozen_graph/create_model.py | #!/usr/bin/env python
"""
Example for a load frozen tf graph functionality.
"""
# MIT License
#
# Copyright (c) 2021 Daisuke Kato
# Copyright (c) 2021 Paul
# Copyright (c) 2022 Sergio Izquierdo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# @file create_model.py
#
# @brief Creates and saves a simple Keras model as a frozen graph.
#
# @section Creates and saves a simple Keras model as a frozen graph.
#
# @section author_create_model Author(s)
# - Created by Daisuke Kato
# - Created by Paul
# - Modified by Sergio Izquierdo
# Imports
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import (
convert_variables_to_constants_v2,
)
input_1 = tf.keras.Input(shape=(5,))
output_1 = tf.keras.layers.Dense(5, activation=tf.nn.relu)(input_1)
output_1 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(output_1)
model = tf.keras.Model(inputs=input_1, outputs=output_1)
# Create frozen graph
x = tf.TensorSpec(model.input_shape, tf.float32, name="x")
concrete_function = tf.function(lambda x: model(x)).get_concrete_function(x)
frozen_model = convert_variables_to_constants_v2(concrete_function)
# Check input/output node name
print(f"{frozen_model.inputs=}")
print(f"{frozen_model.outputs=}")
# Save the graph as protobuf format
directory = "."
tf.io.write_graph(frozen_model.graph, directory, "model.pb", as_text=False)
| 2,376 | 36.140625 | 79 | py |
cppflow | cppflow-master/examples/load_model/create_model.py | #!/usr/bin/env python
"""
Example for a load model functionality.
"""
# MIT License
#
# Copyright (c) 2019 Sergio Izquierdo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# @file create_model.py
#
# @brief Creates and saves a simple Keras model as a saved model.
#
# @section Creates and saves a simple Keras model as a saved model.
#
# @section author_create_model Author(s)
# - Created by Sergio Izquierdo
# Imports
import tensorflow as tf
input_1 = tf.keras.Input(shape=(5,))
output_1 = tf.keras.layers.Dense(5, activation=tf.nn.relu)(input_1)
output_1 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(output_1)
model = tf.keras.Model(inputs=input_1, outputs=output_1)
model.compile()
# Export the model to a SavedModel
model.save('model', save_format='tf')
| 1,787 | 34.76 | 79 | py |
cppflow | cppflow-master/examples/efficientnet/create_model.py | #!/usr/bin/env python
"""
Example for create model functionality.
"""
# MIT License
#
# Copyright (c) 2020 Sergio Izquierdo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# @file create_model.py
#
# @brief Creates and saves an EfficientNet model.
#
# @section description_create_model Creates and saves an EfficientNet model.
#
# @section author_create_model Author(s)
# - Created by Sergio Izquierdo
# Imports
import tensorflow as tf
model = tf.keras.applications.EfficientNetB0()
# Export the model to a SavedModel
model.save('model', save_format='tf')
| 1,575 | 34.818182 | 79 | py |
cppflow | cppflow-master/examples/multi_input_output/create_model.py | #!/usr/bin/env python
"""
Example for a multiple inputs and outputs functionality.
"""
# MIT License
#
# Copyright (c) 2020 Sergio Izquierdo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# @file create_model.py
#
# @brief Creates and saves a simple multi input multi output Keras model.
#
# @section Creates and saves a simple multi input multi output Keras model.
#
# @section author_create_model Author(s)
# - Created by Sergio Izquierdo
# Imports
import tensorflow as tf
input_1 = tf.keras.Input(shape=(5,), name='my_input_1')
input_2 = tf.keras.Input(shape=(5,), name='my_input_2')
x1 = tf.keras.layers.Dense(5, activation=tf.nn.relu)(input_1)
x2 = tf.keras.layers.Dense(5, activation=tf.nn.relu)(input_2)
output_1 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid,
name='my_outputs_1')(x1)
output_2 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid,
name='my_outputs_2')(x2)
model = tf.keras.Model(inputs=[input_1, input_2], outputs=[output_1, output_2])
model.compile()
# Export the model to a SavedModel
model.save('model', save_format='tf')
| 2,144 | 36.631579 | 79 | py |
BOExplain | BOExplain-main/boexplain/optuna/setup.py | import os
import sys
import pkg_resources
from setuptools import find_packages
from setuptools import setup
from typing import Dict
from typing import List
from typing import Optional
def get_version() -> str:
version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
with open(version_filepath) as f:
for line in f:
if line.startswith("__version__"):
return line.strip().split()[-1][1:-1]
assert False
def get_long_description() -> str:
readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
with open(readme_filepath) as f:
return f.read()
def get_install_requires() -> List[str]:
return [
"alembic",
"cliff",
"cmaes>=0.5.0",
"colorlog",
"joblib",
"numpy",
"scipy!=1.4.0",
"sqlalchemy>=1.1.0",
"tqdm",
]
def get_tests_require() -> List[str]:
return get_extras_require()["testing"]
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
"checking": ["black", "hacking", "mypy"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": ["sphinx", "sphinx_rtd_theme"],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"pytorch-ignite",
"scikit-image",
"scikit-learn",
"thop",
"torch==1.4.0" if sys.platform == "darwin" else "torch==1.4.0+cpu",
"torchvision==0.5.0" if sys.platform == "darwin" else "torchvision==0.5.0+cpu",
"xgboost",
]
+ (
["allennlp<1", "fastai<2", "pytorch-lightning>=0.7.1"]
if (3, 5) < sys.version_info[:2] < (3, 8)
else []
)
+ (
["llvmlite<=0.31.0"] if (3, 5) == sys.version_info[:2] else []
) # Newer `llvmlite` is not distributed with wheels for Python 3.5.
+ (
["dask[dataframe]", "dask-ml", "keras", "tensorflow>=2.0.0", "tensorflow-datasets"]
if sys.version_info[:2] < (3, 8)
else []
),
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"fanova",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"pytorch-ignite",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"torch==1.4.0" if sys.platform == "darwin" else "torch==1.4.0+cpu",
"torchvision==0.5.0" if sys.platform == "darwin" else "torchvision==0.5.0+cpu",
"xgboost",
]
+ (
["allennlp<1", "fastai<2", "pytorch-lightning>=0.7.1"]
if (3, 5) < sys.version_info[:2] < (3, 8)
else []
)
+ (
["keras", "tensorflow", "tensorflow-datasets"] if sys.version_info[:2] < (3, 8) else []
),
}
return requirements
def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
pfnopt_pkg = find_any_distribution(["pfnopt"])
if pfnopt_pkg is not None:
msg = (
"We detected that PFNOpt is installed in your environment.\n"
"PFNOpt has been renamed Optuna. Please uninstall the old\n"
"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`)."
)
print(msg)
exit(1)
setup(
name="optuna",
version=get_version(),
description="A hyperparameter optimization framework",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Takuya Akiba",
author_email="akiba@preferred.jp",
url="https://optuna.org/",
packages=find_packages(),
package_data={
"optuna": [
"storages/rdb/alembic.ini",
"storages/rdb/alembic/*.*",
"storages/rdb/alembic/versions/*.*",
]
},
install_requires=get_install_requires(),
tests_require=get_tests_require(),
extras_require=get_extras_require(),
entry_points={
"console_scripts": ["optuna = optuna.cli:main"],
"optuna.command": [
"create-study = optuna.cli:_CreateStudy",
"delete-study = optuna.cli:_DeleteStudy",
"study set-user-attr = optuna.cli:_StudySetUserAttribute",
"studies = optuna.cli:_Studies",
"dashboard = optuna.cli:_Dashboard",
"study optimize = optuna.cli:_StudyOptimize",
"storage upgrade = optuna.cli:_StorageUpgrade",
],
},
)
| 5,283 | 28.032967 | 99 | py |
SPMC_VideoSR | SPMC_VideoSR-master/modules/utils.py |
import tensorflow as tf
def weight_from_caffe(caffenet):
def func(shape, dtype):
sc = tf.get_variable_scope()
name = sc.name.split('/')[-1]
print 'init: ', name, shape, caffenet.params[name][0].data.shape
return tf.transpose(caffenet.params[name][0].data, perm=[2 ,3 ,1 ,0])
return func
def bias_from_caffe(caffenet):
def func(shape, dtype):
sc = tf.get_variable_scope()
name = sc.name.split('/')[-1]
return caffenet.params[name][1].data
return func
| 527 | 25.4 | 77 | py |
CLOSURE | CLOSURE-master/vr/utils.py | #!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import inspect
import json
import torch
from vr.models import (ModuleNet,
Seq2Seq,
Seq2SeqAtt,
LstmModel,
CnnLstmModel,
CnnLstmSaModel,
FiLMedNet,
FiLMGen,
MAC)
from vr.ns_vqa.parser import Seq2seqParser
from vr.ns_vqa.clevr_executor import ClevrExecutor
def invert_dict(d):
return {v: k for k, v in d.items()}
def load_vocab(path):
with open(path, 'r') as f:
vocab = json.load(f)
vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
vocab['program_idx_to_token'] = invert_dict(vocab['program_token_to_idx'])
vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
# Sanity check: make sure <NULL>, <START>, and <END> are consistent
assert vocab['question_token_to_idx']['<NULL>'] == 0
assert vocab['question_token_to_idx']['<START>'] == 1
assert vocab['question_token_to_idx']['<END>'] == 2
assert vocab['program_token_to_idx']['<NULL>'] == 0
assert vocab['program_token_to_idx']['<START>'] == 1
assert vocab['program_token_to_idx']['<END>'] == 2
return vocab
def load_cpu(path):
"""
Loads a torch checkpoint, remapping all Tensors to CPU
"""
return torch.load(path, map_location={'cuda:0': 'cpu'})
def load_program_generator(path):
checkpoint = load_cpu(path)
model_type = checkpoint['args']['model_type']
kwargs = checkpoint['program_generator_kwargs']
state = checkpoint['program_generator_state']
if model_type in ['FiLM', 'MAC', 'RelNet', 'Control-EE']:
model = FiLMGen(**kwargs)
elif model_type == 'PG+EE' or model_type == 'PG':
if checkpoint['args']['ns_vqa']:
model = Seq2seqParser(checkpoint['vocab'])
else:
model = Seq2SeqAtt(**kwargs)
else:
model = None
if model is not None:
model.load_state_dict(state)
return model, kwargs
def load_execution_engine(path, verbose=True):
checkpoint = load_cpu(path)
if checkpoint['args'].get('symbolic_ee'):
vocab = load_vocab(checkpoint['args']['vocab_json'])
ee = ClevrExecutor(vocab)
return ee, {}
model_type = checkpoint['args']['model_type']
kwargs = checkpoint['execution_engine_kwargs']
state = checkpoint['execution_engine_state']
kwargs['verbose'] = verbose
if model_type == 'FiLM':
model = FiLMedNet(**kwargs)
elif model_type in ['PG+EE', 'EE', 'Control-EE']:
kwargs.pop('sharing_patterns', None)
kwargs.setdefault('module_pool', 'mean')
kwargs.setdefault('module_use_gammas', 'linear')
model = ModuleNet(**kwargs)
elif model_type == 'MAC':
kwargs.setdefault('write_unit', 'original')
kwargs.setdefault('read_connect', 'last')
kwargs.setdefault('read_unit', 'original')
kwargs.setdefault('noisy_controls', False)
kwargs.pop('sharing_params_patterns', None)
model = MAC(**kwargs)
elif model_type == 'RelNet':
model = RelationNet(**kwargs)
elif model_type == 'SHNMN':
model = SHNMN(**kwargs)
elif model_type == 'SimpleNMN':
model = SimpleModuleNet(**kwargs)
else:
raise ValueError()
cur_state = model.state_dict()
model.load_state_dict(state)
return model, kwargs
def load_baseline(path):
model_cls_dict = {
'LSTM': LstmModel,
'CNN+LSTM': CnnLstmModel,
'CNN+LSTM+SA': CnnLstmSaModel,
}
checkpoint = load_cpu(path)
baseline_type = checkpoint['baseline_type']
kwargs = checkpoint['baseline_kwargs']
state = checkpoint['baseline_state']
model = model_cls_dict[baseline_type](**kwargs)
model.load_state_dict(state)
return model, kwargs
def get_updated_args(kwargs, object_class):
"""
Returns kwargs with renamed args or arg valuesand deleted, deprecated, unused args.
Useful for loading older, trained models.
If using this function is neccessary, use immediately before initializing object.
"""
# Update arg values
for arg in arg_value_updates:
if arg in kwargs and kwargs[arg] in arg_value_updates[arg]:
kwargs[arg] = arg_value_updates[arg][kwargs[arg]]
# Delete deprecated, unused args
valid_args = inspect.getargspec(object_class.__init__)[0]
new_kwargs = {valid_arg: kwargs[valid_arg] for valid_arg in valid_args if valid_arg in kwargs}
return new_kwargs
class EMA():
def __init__(self, mu):
self.mu = mu
self.shadow = {}
def register(self, cat, name, val):
self.shadow[cat + '-' + name] = val.clone()
def __call__(self, cat, name, x):
name = cat + '-' + name
assert name in self.shadow
new_average = self.mu * x + (1.0 - self.mu) * self.shadow[name]
self.shadow[name] = new_average.clone()
return new_average
def load_tbd_net(checkpoint, vocab):
""" Convenience function to load a TbD-Net model from a checkpoint file.
Parameters
----------
checkpoint : Union[pathlib.Path, str]
The path to the checkpoint.
vocab : Dict[str, Dict[any, any]]
The vocabulary file associated with the TbD-Net. For an extended description, see above.
Returns
-------
torch.nn.Module
The TbD-Net model.
Notes
-----
This pushes the TbD-Net model to the GPU if a GPU is available.
"""
tbd_net = TbDNet(vocab)
tbd_net.load_state_dict(torch.load(str(checkpoint), map_location={'cuda:0': 'cpu'}))
if torch.cuda.is_available():
tbd_net.cuda()
return tbd_net
| 6,077 | 32.58011 | 138 | py |
CLOSURE | CLOSURE-master/vr/data.py | #!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import numpy as np
import PIL.Image
import h5py
import io
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import random, math
import vr.programs
import json
from vr.programs import ProgramConverter
def load_scenes(scenes_json):
with open(scenes_json) as f:
scenes_dict = json.load(f)['scenes']
scenes = []
for s in scenes_dict:
table = []
for i, o in enumerate(s['objects']):
item = {}
item['id'] = '%d-%d' % (s['image_index'], i)
if '3d_coords' in o:
item['position'] = [np.dot(o['3d_coords'], s['directions']['right']),
np.dot(o['3d_coords'], s['directions']['front']),
o['3d_coords'][2]]
else:
item['position'] = o['position']
item['color'] = o['color']
item['material'] = o['material']
item['shape'] = o['shape']
item['size'] = o['size']
table.append(item)
scenes.append(table)
return scenes
def _dataset_to_tensor(dset, mask=None, dtype=None):
arr = np.asarray(dset, dtype=np.int64 if dtype is None else dtype)
if mask is not None:
arr = arr[mask]
tensor = torch.LongTensor(arr)
return tensor
def _gen_subsample_mask(num, percent=1.0):
chosen_num = math.floor(num * percent)
mask = np.full((num,), False)
selected_ids = np.asarray(random.sample(range(num), chosen_num), dtype='int32')
mask[selected_ids] = True
return mask
class ClevrDataset(Dataset):
def __init__(self, question_h5, feature_h5_path, scene_path, vocab,
mode='prefix', load_features=False,
max_samples=None, question_families=None, percent_of_data=1.0,
oversample=None, oversample_shift=None):
print('CLEVR DATASET')
mode_choices = ['prefix', 'postfix']
if mode not in mode_choices:
raise ValueError('Invalid mode "%s"' % mode)
self.vocab = vocab
self.program_converter = ProgramConverter(vocab)
self.feature_h5_path = feature_h5_path
self.feature_h5 = None
self.all_features = None
self.load_features = load_features
self.mode = mode
self.max_samples = max_samples
# Compute the mask
num_mask_options_chosen = (
int(percent_of_data < 1.0) + int(question_families is not None)
+ int(oversample is not None))
if num_mask_options_chosen > 1:
raise ValueError()
mask = None
if oversample is not None:
all_families = question_h5['question_families'][()]
regular_indices = (all_families < oversample_shift).nonzero()[0]
oversampled_indices = (all_families >= oversample_shift).nonzero()[0]
mask = np.hstack([regular_indices] + [oversampled_indices] * oversample)
if question_families is not None:
# Use only the specified families
all_families = np.asarray(question_h5['question_families'])
N = all_families.shape[0]
print(question_families)
target_families = np.asarray(question_families)[:, None]
mask = (all_families == target_families).any(axis=0)
if percent_of_data < 1.0:
num_example = np.asarray(question_h5['image_idxs']).shape[0]
mask = _gen_subsample_mask(num_example, percent_of_data)
self.mask = mask
# Data from the question file is small, so read it all into memory
print('Reading question data into memory')
self.all_types = None
if 'types' in question_h5:
self.all_types = _dataset_to_tensor(question_h5['types'], mask)
self.all_question_families = None
if 'question_families' in question_h5:
self.all_question_families = _dataset_to_tensor(question_h5['question_families'], mask)
self.all_questions = _dataset_to_tensor(question_h5['questions'], mask)
self.all_image_idxs = _dataset_to_tensor(question_h5['image_idxs'], mask)
self.all_programs = None
if 'programs' in question_h5:
self.all_programs = _dataset_to_tensor(question_h5['programs'], mask)
self.all_answers = None
if 'answers' in question_h5:
self.all_answers = _dataset_to_tensor(question_h5['answers'], mask)
if scene_path:
self.all_scenes = load_scenes(scene_path)
else:
self.all_scenes = None
def __getitem__(self, index):
# Open the feature or load them if requested
if self.feature_h5_path and not self.feature_h5:
self.feature_h5 = h5py.File(self.feature_h5_path, 'r')
if self.load_features:
self.features = self.feature_h5['features'][()]
if self.all_question_families is not None:
question_family = self.all_question_families[index]
q_type = None if self.all_types is None else self.all_types[index]
question = self.all_questions[index]
image_idx = self.all_image_idxs[index]
answer = None
if self.all_answers is not None:
answer = self.all_answers[index]
program_seq = None
if self.all_programs is not None:
program_seq = self.all_programs[index]
if self.all_scenes:
scene = self.all_scenes[image_idx]
else:
scene = None
if self.feature_h5_path:
if self.load_features:
feats = self.features[image_idx]
else:
feats = self.feature_h5['features'][image_idx]
if feats.ndim == 1:
feats = np.array(PIL.Image.open(io.BytesIO(feats))).transpose(2, 0, 1) / 255.0
else:
feats = [0]
feats = torch.FloatTensor(np.asarray(feats, dtype=np.float32))
if q_type is None:
return (question, index, feats, scene, answer, program_seq)
return ([question, q_type], index, feats, scene, answer, program_seq)
def __len__(self):
if self.max_samples is None:
return self.all_questions.size(0)
else:
return min(self.max_samples, self.all_questions.size(0))
class ClevrDataLoader(DataLoader):
def __init__(self, **kwargs):
if 'question_h5' not in kwargs:
raise ValueError('Must give question_h5')
if 'feature_h5' not in kwargs:
raise ValueError('Must give feature_h5')
if 'vocab' not in kwargs:
raise ValueError('Must give vocab')
scene_path = kwargs.pop('scene_path')
print('Reading scenes from ', scene_path)
feature_h5_path = kwargs.pop('feature_h5')
print('Reading features from ', feature_h5_path)
question_h5_path = kwargs.pop('question_h5')
print('Reading questions from ', question_h5_path)
vocab = kwargs.pop('vocab')
mode = kwargs.pop('mode', 'prefix')
load_features = kwargs.pop('load_features', False)
percent_of_data = kwargs.pop('percent_of_data', 1.)
oversample = kwargs.pop('oversample', None)
oversample_shift = kwargs.pop('oversample_shift', None)
question_families = kwargs.pop('question_families', None)
max_samples = kwargs.pop('max_samples', None)
with h5py.File(question_h5_path, 'r') as question_h5:
self.dataset = ClevrDataset(
question_h5, feature_h5_path, scene_path, vocab, mode,
load_features=load_features,
max_samples=max_samples,
question_families=question_families,
percent_of_data=percent_of_data,
oversample=oversample,
oversample_shift=oversample_shift)
kwargs['collate_fn'] = clevr_collate
super(ClevrDataLoader, self).__init__(self.dataset, **kwargs)
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def clevr_collate(batch):
transposed = list(zip(*batch))
question_batch = default_collate(transposed[0])
index_batch = default_collate(transposed[1])
feat_batch = transposed[2]
if all(f is not None for f in feat_batch):
feat_batch = default_collate(feat_batch)
scene_batch = transposed[3]
answer_batch = transposed[4]
if transposed[4][0] is not None:
answer_batch = default_collate(answer_batch)
program_seq_batch = transposed[5]
if transposed[5][0] is not None:
program_seq_batch = default_collate(program_seq_batch)
return [question_batch, index_batch, feat_batch, scene_batch, answer_batch, program_seq_batch]
| 9,208 | 37.85654 | 138 | py |
CLOSURE | CLOSURE-master/vr/models/seq2seq_att.py | #!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import math
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import (pack_padded_sequence,
pad_packed_sequence)
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 3, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, output, encoder_outputs, encoder_mask):
seq_len = encoder_outputs.size(1)
keys = output.repeat(seq_len, 1, 1).transpose(0,1)
attn_energies = self.score(keys, encoder_outputs) # B*1*T
attn_energies -= 1000 * (encoder_mask[:, None, :] == 0).float()
return F.softmax(attn_energies, dim=2)
def score(self, hidden, encoder_outputs):
energy = torch.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) # [B*T*2H]->[B*T*H]
energy = energy.transpose(2,1) # [B*H*T]
v = self.v.repeat(encoder_outputs.data.shape[0],1).unsqueeze(1) #[B*1*H]
energy = torch.bmm(v, energy) # [B*1*T]
return energy
class Seq2SeqAtt(nn.Module):
def __init__(self,
null_token=0,
start_token=1,
end_token=2,
encoder_vocab_size=100,
decoder_vocab_size=100,
wordvec_dim=300,
hidden_dim=256,
rnn_num_layers=2,
rnn_dropout=0,
autoregressive=True,
):
super().__init__()
self.encoder_embed = nn.Embedding(encoder_vocab_size, wordvec_dim)
self.encoder_rnn = nn.LSTM(wordvec_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, bidirectional=True, batch_first=True)
self.decoder_embed = nn.Embedding(decoder_vocab_size, wordvec_dim)
self.decoder_rnn = nn.LSTM(wordvec_dim + 2 * hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_linear = nn.Linear(3 * hidden_dim, decoder_vocab_size)
self.decoder_attn = Attn(hidden_dim)
self.rnn_num_layers = rnn_num_layers
self.NULL = null_token
self.START = start_token
self.END = end_token
self.multinomial_outputs = None
self.autoregressive = autoregressive
self.save_activations = False
def expand_encoder_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.encoder_embed, token_to_idx,
word2vec=word2vec, std=std)
def get_dims(self, x=None, y=None):
V_in = self.encoder_embed.num_embeddings
V_out = self.decoder_embed.num_embeddings
D = self.encoder_embed.embedding_dim
H = self.encoder_rnn.hidden_size
L = self.encoder_rnn.num_layers
N = x.size(0) if x is not None else None
N = y.size(0) if N is None and y is not None else N
T_in = x.size(1) if x is not None else None
T_out = y.size(1) if y is not None else None
return V_in, V_out, D, H, L, N, T_in, T_out
def encoder(self, x):
x, x_lengths, inverse_index = sort_for_rnn(x, null=self.NULL)
embed = self.encoder_embed(x)
packed = pack_padded_sequence(embed, x_lengths, batch_first=True)
out_packed, hidden = self.encoder_rnn(packed)
out, _ = pad_packed_sequence(out_packed, batch_first=True)
out = out[inverse_index]
hidden = [h[:,inverse_index] for h in hidden]
return out, hidden
def decoder(self, word_inputs, prev_hidden, encoder_outputs, encoder_mask):
hn, cn, an = prev_hidden
# 1 - rnn transition
word_embedded = self.decoder_embed(word_inputs)
if not self.autoregressive:
word_embedded = torch.zeros_like(word_embedded)
rnn_input = torch.cat((word_embedded, an), 1)[:, None, :]
output, (hnext, cnext) = self.decoder_rnn(rnn_input, (hn, cn))
output = output[:, 0, :]
# 2 - perform attention
attn_weights = self.decoder_attn(output, encoder_outputs, encoder_mask)
anext = attn_weights.bmm(encoder_outputs)[:, 0, :]
if self.save_activations:
self._attn_weights.append(attn_weights)
# 3 - compute output logits
logits = self.decoder_linear(torch.cat([output, anext], 1))
return logits, (hnext, cnext, anext)
def compute_loss(self, output_logprobs, y):
"""
Compute loss. We assume that the first element of the output sequence y is
a start token, and that each element of y is left-aligned and right-padded
with self.NULL out to T_out. We want the output_logprobs to predict the
sequence y, shifted by one timestep so that y[0] is fed to the network and
then y[1] is predicted. We also don't want to compute loss for padded
timesteps.
Inputs:
- output_logprobs: Variable of shape (N, T_out, V_out)
- y: LongTensor Variable of shape (N, T_out)
"""
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
output_logprobs = output_logprobs[:, :-1].contiguous()
y = y[:, 1:].contiguous()
losses = F.cross_entropy(output_logprobs.view(-1, V_out), y.view(-1), reduction='none')
losses = losses.view(N, T_out - 1)
losses *= (y != self.NULL).float()
return losses.sum(1)
def log_likelihood(self, x, y):
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
encoder_outputs, _ = self.encoder(x)
encoder_mask = x != self.NULL
decoder_inputs = y
decoder_hidden = (torch.zeros(L, N, H).to(x.device),
torch.zeros(L, N, H).to(x.device),
torch.zeros(N, 2 * H).to(x.device)) # attention state
decoder_outputs = []
for t in range(T_out):
decoder_out, decoder_hidden = self.decoder(
decoder_inputs[:,t], decoder_hidden,
encoder_outputs, encoder_mask)
decoder_outputs.append(decoder_out)
decoder_outputs = torch.stack(decoder_outputs, dim=1)
loss = self.compute_loss(decoder_outputs, y)
return loss
def forward(self, x, max_length=30, temperature=1.0, argmax=False):
self._attn_weights = []
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(x=x)
T_out = max_length
encoded, _ = self.encoder(x)
encoder_mask = x != self.NULL
h, c, a = (torch.zeros(L, N, H).to(x.device), # hidden state
torch.zeros(L, N, H).to(x.device), # cell state
torch.zeros(N, 2 * H).to(x.device)) # attention state
# buffers (on CPU currently)
cur_input = Variable(x.data.new(N).fill_(self.START))
y = torch.LongTensor(N, T_out).fill_(self.NULL)
y[:, 0] = cur_input
y_logprobs = torch.zeros((N, T_out))
done = torch.ByteTensor(N).fill_(0)
for t in range(1, T_out):
# generate output
logprobs, (h, c, a) = self.decoder(cur_input, (h, c, a), encoded, encoder_mask)
logprobs = logprobs / temperature
logprobs = F.log_softmax(logprobs, dim=1)
if argmax:
_, cur_output = logprobs.max(1)
else:
cur_output = torch.exp(logprobs).multinomial(1)[:, 0]
# save output
cur_output_data = cur_output.data.cpu()
not_done = logical_not(done)
y[not_done, t] = cur_output_data[not_done]
y_logprobs[:, t] = logprobs[torch.arange(N), cur_output]
done = logical_or(done, (cur_output_data == self.END).byte())
cur_input = cur_output
# stop if fully done
if done.sum() == N:
break
return y.to(x.device), y_logprobs.to(x.device)
def logical_or(x, y):
return (x + y).clamp_(0, 1)
def logical_not(x):
return x == 0
def sort_for_rnn(x, null=0):
lengths = torch.sum(x != null, dim=1).long()
sorted_lengths, sorted_idx = torch.sort(lengths, dim=0, descending=True)
sorted_lengths = sorted_lengths.data.tolist() # remove for pytorch 0.4+
# ugly
inverse_sorted_idx = torch.LongTensor(sorted_idx.shape).fill_(0).to(x.device)
for i, v in enumerate(sorted_idx):
inverse_sorted_idx[v.data] = i
return x[sorted_idx], sorted_lengths, inverse_sorted_idx
| 8,941 | 38.566372 | 138 | py |
CLOSURE | CLOSURE-master/vr/models/baselines.py | #!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from vr.models.layers import init_modules, ResidualBlock
class StackedAttention(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(StackedAttention, self).__init__()
self.Wv = nn.Conv2d(input_dim, hidden_dim, kernel_size=1, padding=0)
self.Wu = nn.Linear(input_dim, hidden_dim)
self.Wp = nn.Conv2d(hidden_dim, 1, kernel_size=1, padding=0)
self.hidden_dim = hidden_dim
self.attention_maps = None
init_modules(self.modules(), init='normal')
def forward(self, v, u):
"""
Input:
- v: N x D x H x W
- u: N x D
Returns:
- next_u: N x D
"""
N, K = v.size(0), self.hidden_dim
D, H, W = v.size(1), v.size(2), v.size(3)
v_proj = self.Wv(v) # N x K x H x W
u_proj = self.Wu(u) # N x K
u_proj_expand = u_proj.view(N, K, 1, 1).expand(N, K, H, W)
h = F.tanh(v_proj + u_proj_expand)
p = F.softmax(self.Wp(h).view(N, H * W)).view(N, 1, H, W)
self.attention_maps = p.data.clone()
v_tilde = (p.expand_as(v) * v).sum(3).sum(2).view(N, D)
next_u = u + v_tilde
return next_u
class LstmEncoder(nn.Module):
def __init__(self, token_to_idx, wordvec_dim=300,
rnn_dim=256, rnn_num_layers=2, rnn_dropout=0):
super(LstmEncoder, self).__init__()
self.token_to_idx = token_to_idx
self.NULL = token_to_idx['<NULL>']
self.START = token_to_idx['<START>']
self.END = token_to_idx['<END>']
self.embed = nn.Embedding(len(token_to_idx), wordvec_dim)
self.rnn = nn.LSTM(wordvec_dim, rnn_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
def expand_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.embed, token_to_idx,
word2vec=word2vec, std=std)
def forward(self, x):
N, T = x.size()
idx = torch.LongTensor(N).fill_(T - 1)
# Find the last non-null element in each sequence
x_cpu = x.data.cpu()
for i in range(N):
for t in range(T - 1):
if x_cpu[i, t] != self.NULL and x_cpu[i, t + 1] == self.NULL:
idx[i] = t
break
idx = idx.type_as(x.data).long()
idx = Variable(idx, requires_grad=False)
hs, _ = self.rnn(self.embed(x))
idx = idx.view(N, 1, 1).expand(N, 1, hs.size(2))
H = hs.size(2)
return hs.gather(1, idx).view(N, H)
def build_cnn(feat_dim=(1024, 14, 14),
res_block_dim=128,
num_res_blocks=0,
proj_dim=512,
pooling='maxpool2'):
C, H, W = feat_dim
layers = []
if num_res_blocks > 0:
layers.append(nn.Conv2d(C, res_block_dim, kernel_size=3, padding=1))
layers.append(nn.ReLU(inplace=True))
C = res_block_dim
for _ in range(num_res_blocks):
layers.append(ResidualBlock(C))
if proj_dim > 0:
layers.append(nn.Conv2d(C, proj_dim, kernel_size=1, padding=0))
layers.append(nn.ReLU(inplace=True))
C = proj_dim
if pooling == 'maxpool2':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
H, W = H // 2, W // 2
return nn.Sequential(*layers), (C, H, W)
def build_mlp(input_dim, hidden_dims, output_dim,
use_batchnorm=False, dropout=0):
layers = []
D = input_dim
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
if use_batchnorm:
layers.append(nn.BatchNorm1d(input_dim))
for dim in hidden_dims:
layers.append(nn.Linear(D, dim))
if use_batchnorm:
layers.append(nn.BatchNorm1d(dim))
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
layers.append(nn.ReLU(inplace=True))
D = dim
layers.append(nn.Linear(D, output_dim))
return nn.Sequential(*layers)
class LstmModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
fc_use_batchnorm=False, fc_dropout=0, fc_dims=(1024,)):
super(LstmModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
classifier_kwargs = {
'input_dim': rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_kwargs)
def forward(self, questions, feats):
q_feats = self.rnn(questions)
scores = self.classifier(q_feats)
return scores
class CnnLstmModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
cnn_feat_dim=(1024,14,14),
cnn_res_block_dim=128, cnn_num_res_blocks=0,
cnn_proj_dim=512, cnn_pooling='maxpool2',
fc_dims=(1024,), fc_use_batchnorm=False, fc_dropout=0):
super(CnnLstmModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
cnn_kwargs = {
'feat_dim': cnn_feat_dim,
'res_block_dim': cnn_res_block_dim,
'num_res_blocks': cnn_num_res_blocks,
'proj_dim': cnn_proj_dim,
'pooling': cnn_pooling,
}
self.cnn, (C, H, W) = build_cnn(**cnn_kwargs)
classifier_kwargs = {
'input_dim': C * H * W + rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_kwargs)
def forward(self, questions, feats):
N = questions.size(0)
assert N == feats.size(0)
q_feats = self.rnn(questions)
img_feats = self.cnn(feats)
cat_feats = torch.cat([q_feats, img_feats.view(N, -1)], 1)
scores = self.classifier(cat_feats)
return scores
class CnnLstmSaModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
cnn_feat_dim=(1024,14,14),
stacked_attn_dim=512, num_stacked_attn=2,
fc_use_batchnorm=False, fc_dropout=0, fc_dims=(1024,)):
super(CnnLstmSaModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
C, H, W = cnn_feat_dim
self.image_proj = nn.Conv2d(C, rnn_dim, kernel_size=1, padding=0)
self.stacked_attns = []
for i in range(num_stacked_attn):
sa = StackedAttention(rnn_dim, stacked_attn_dim)
self.stacked_attns.append(sa)
self.add_module('stacked-attn-%d' % i, sa)
classifier_args = {
'input_dim': rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_args)
init_modules(self.modules(), init='normal')
def forward(self, questions, feats):
u = self.rnn(questions)
v = self.image_proj(feats)
for sa in self.stacked_attns:
u = sa(v, u)
scores = self.classifier(u)
return scores
| 8,657 | 34.052632 | 138 | py |
CLOSURE | CLOSURE-master/vr/models/filmed_net.py | #!/usr/bin/env python3
import math
import pprint
from termcolor import colored
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models
from vr.models.layers import init_modules, GlobalAveragePool, Flatten
from vr.models.layers import build_classifier, build_stem
import vr.programs
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FiLM(nn.Module):
"""
A Feature-wise Linear Modulation Layer from
'FiLM: Visual Reasoning with a General Conditioning Layer'
"""
def __init__(self, use_gammas='identity'):
super().__init__()
self.use_gammas = use_gammas
def forward(self, x, gammas, betas):
gammas = gammas.unsqueeze(2).unsqueeze(3).expand_as(x)
if self.use_gammas == 'identity':
pass
elif self.use_gammas == 'sigmoid':
gammas = torch.sigmoid(gammas)
elif self.use_gammas == 'tanh':
gammas = 1 + 2 * torch.tanh(gammas)
elif self.use_gammas == 'nope':
gammas = 1
else:
raise ValueError()
betas = betas.unsqueeze(2).unsqueeze(3).expand_as(x)
return (gammas * x) + betas
class FiLMedNet(nn.Module):
def __init__(self, vocab, feature_dim=(1024, 14, 14),
stem_num_layers=2,
stem_batchnorm=False,
stem_kernel_size=3,
stem_subsample_layers=None,
stem_stride=1,
stem_padding=None,
stem_dim=64,
num_modules=4,
module_num_layers=1,
module_dim=128,
module_residual=True,
module_intermediate_batchnorm=False,
module_batchnorm=False,
module_batchnorm_affine=False,
module_dropout=0,
module_input_proj=1,
module_kernel_size=3,
classifier_proj_dim=512,
classifier_downsample='maxpool2',
classifier_fc_layers=(1024,),
classifier_batchnorm=False,
classifier_dropout=0,
condition_method='bn-film',
condition_pattern=[],
use_gamma=True,
use_beta=True,
use_coords=1,
debug_every=float('inf'),
print_verbose_every=float('inf'),
verbose=True,
):
super(FiLMedNet, self).__init__()
num_answers = len(vocab['answer_idx_to_token'])
self.stem_times = []
self.module_times = []
self.classifier_times = []
self.timing = False
self.num_modules = num_modules
self.module_num_layers = module_num_layers
self.module_batchnorm = module_batchnorm
self.module_dim = module_dim
self.condition_method = condition_method
self.use_gamma = use_gamma
self.use_beta = use_beta
self.use_coords_freq = use_coords
self.debug_every = debug_every
self.print_verbose_every = print_verbose_every
# Initialize helper variables
self.stem_use_coords = (stem_stride == 1) and (self.use_coords_freq > 0)
self.condition_pattern = condition_pattern
if len(condition_pattern) == 0:
self.condition_pattern = []
for i in range(self.module_num_layers * self.num_modules):
self.condition_pattern.append(self.condition_method != 'concat')
else:
self.condition_pattern = [i > 0 for i in self.condition_pattern]
self.extra_channel_freq = self.use_coords_freq
self.block = FiLMedResBlock
self.num_cond_maps = 2 * self.module_dim if self.condition_method == 'concat' else 0
self.fwd_count = 0
self.num_extra_channels = 2 if self.use_coords_freq > 0 else 0
if self.debug_every <= -1:
self.print_verbose_every = 1
# Initialize stem
stem_feature_dim = feature_dim[0] + self.stem_use_coords * self.num_extra_channels
self.stem = build_stem(
stem_feature_dim, stem_dim, module_dim,
num_layers=stem_num_layers, with_batchnorm=stem_batchnorm,
kernel_size=stem_kernel_size, stride=stem_stride, padding=stem_padding,
subsample_layers=stem_subsample_layers)
tmp = self.stem(Variable(torch.zeros([1, feature_dim[0], feature_dim[1], feature_dim[2]])))
module_H = tmp.size(2)
module_W = tmp.size(3)
self.stem_coords = coord_map((feature_dim[1], feature_dim[2]))
self.coords = coord_map((module_H, module_W))
self.default_weight = torch.ones(1, 1, self.module_dim).to(device)
self.default_bias = torch.zeros(1, 1, self.module_dim).to(device)
# Initialize FiLMed network body
self.function_modules = {}
self.vocab = vocab
for fn_num in range(self.num_modules):
with_cond = self.condition_pattern[self.module_num_layers * fn_num:
self.module_num_layers * (fn_num + 1)]
mod = self.block(module_dim, with_residual=module_residual,
with_intermediate_batchnorm=module_intermediate_batchnorm, with_batchnorm=module_batchnorm,
with_cond=with_cond,
dropout=module_dropout,
num_extra_channels=self.num_extra_channels,
extra_channel_freq=self.extra_channel_freq,
with_input_proj=module_input_proj,
num_cond_maps=self.num_cond_maps,
kernel_size=module_kernel_size,
batchnorm_affine=module_batchnorm_affine,
num_layers=self.module_num_layers,
condition_method=condition_method,
debug_every=self.debug_every)
self.add_module(str(fn_num), mod)
self.function_modules[fn_num] = mod
# Initialize output classifier
self.classifier = build_classifier(module_dim + self.num_extra_channels, module_H, module_W,
num_answers, classifier_fc_layers, classifier_proj_dim,
classifier_downsample, with_batchnorm=classifier_batchnorm,
dropout=classifier_dropout)
init_modules(self.modules())
def forward(self, x, film, save_activations=False):
# Initialize forward pass and externally viewable activations
self.fwd_count += 1
if save_activations:
self.feats = None
self.module_outputs = []
self.cf_input = None
if self.debug_every <= -2:
pdb.set_trace()
# Prepare FiLM layers
gammas = None
betas = None
if self.condition_method == 'concat':
# Use parameters usually used to condition via FiLM instead to condition via concatenation
cond_params = film[:,:,:2*self.module_dim]
cond_maps = cond_params.unsqueeze(3).unsqueeze(4).expand(cond_params.size() + x.size()[-2:])
else:
gammas, betas = torch.split(film[:,:,:2*self.module_dim], self.module_dim, dim=-1)
if not self.use_gamma:
gammas = self.default_weight.expand_as(gammas)
if not self.use_beta:
betas = self.default_bias.expand_as(betas)
# Propagate up image features CNN
stem_batch_coords = None
batch_coods = None
if self.use_coords_freq > 0:
stem_batch_coords = self.stem_coords.unsqueeze(0).expand(
torch.Size((x.size(0), *self.stem_coords.size())))
batch_coords = self.coords.unsqueeze(0).expand(
torch.Size((x.size(0), *self.coords.size())))
if self.stem_use_coords:
x = torch.cat([x, stem_batch_coords], 1)
feats = self.stem(x)
if save_activations:
self.feats = feats
N, _, H, W = feats.size()
# Propagate up the network from low-to-high numbered blocks
module_inputs = torch.zeros(feats.size()).unsqueeze(1).expand(
N, self.num_modules, self.module_dim, H, W).to(device)
module_inputs[:,0] = feats
for fn_num in range(self.num_modules):
if self.condition_method == 'concat':
layer_output = self.function_modules[fn_num](module_inputs[:,fn_num],
extra_channels=batch_coords, cond_maps=cond_maps[:,fn_num])
else:
layer_output = self.function_modules[fn_num](module_inputs[:,fn_num],
gammas[:,fn_num,:], betas[:,fn_num,:], batch_coords)
# Store for future computation
if save_activations:
self.module_outputs.append(layer_output)
if fn_num == (self.num_modules - 1):
final_module_output = layer_output
else:
module_inputs_updated = module_inputs.clone()
module_inputs_updated[:,fn_num+1] = module_inputs_updated[:,fn_num+1] + layer_output
module_inputs = module_inputs_updated
if self.debug_every <= -2:
pdb.set_trace()
# Run the final classifier over the resultant, post-modulated features.
if self.use_coords_freq > 0:
final_module_output = torch.cat([final_module_output, batch_coords], 1)
if save_activations:
self.cf_input = final_module_output
out = self.classifier(final_module_output)
if ((self.fwd_count % self.debug_every) == 0) or (self.debug_every <= -1):
pdb.set_trace()
return out
class FiLMedResBlock(nn.Module):
def __init__(self, in_dim, out_dim=None, with_residual=True, with_intermediate_batchnorm=False, with_batchnorm=True,
with_cond=[False], dropout=0, num_extra_channels=0, extra_channel_freq=1,
with_input_proj=0, num_cond_maps=0, kernel_size=3, batchnorm_affine=False,
num_layers=1, condition_method='bn-film', debug_every=float('inf')):
if out_dim is None:
out_dim = in_dim
super(FiLMedResBlock, self).__init__()
self.with_residual = with_residual
self.with_intermediate_batchnorm = with_intermediate_batchnorm
self.with_batchnorm = with_batchnorm
self.with_cond = with_cond
self.dropout = dropout
self.extra_channel_freq = 0 if num_extra_channels == 0 else extra_channel_freq
self.with_input_proj = with_input_proj # Kernel size of input projection
self.num_cond_maps = num_cond_maps
self.kernel_size = kernel_size
self.batchnorm_affine = batchnorm_affine
self.num_layers = num_layers
self.condition_method = condition_method
self.debug_every = debug_every
if self.kernel_size % 2 == 0:
raise(NotImplementedError)
if self.num_layers >= 2:
raise(NotImplementedError)
if self.condition_method == 'block-input-film' and self.with_cond[0]:
self.film = FiLM()
if self.with_input_proj:
self.input_proj = nn.Conv2d(in_dim + (num_extra_channels if self.extra_channel_freq >= 1 else 0),
in_dim, kernel_size=self.with_input_proj, padding=self.with_input_proj // 2)
self.conv1 = nn.Conv2d(in_dim + self.num_cond_maps +
(num_extra_channels if self.extra_channel_freq >= 2 else 0),
out_dim, kernel_size=self.kernel_size,
padding=self.kernel_size // 2)
if self.condition_method == 'conv-film' and self.with_cond[0]:
self.film = FiLM()
if self.with_intermediate_batchnorm:
self.bn0 = nn.BatchNorm2d(in_dim, affine=((not self.with_cond[0]) or self.batchnorm_affine))
if self.with_batchnorm:
self.bn1 = nn.BatchNorm2d(out_dim, affine=((not self.with_cond[0]) or self.batchnorm_affine))
if self.condition_method == 'bn-film' and self.with_cond[0]:
self.film = FiLM()
if dropout > 0:
self.drop = nn.Dropout2d(p=self.dropout)
if ((self.condition_method == 'relu-film' or self.condition_method == 'block-output-film')
and self.with_cond[0]):
self.film = FiLM()
init_modules(self.modules())
def forward(self, x, gammas=None, betas=None, extra_channels=None, cond_maps=None):
if self.debug_every <= -2:
pdb.set_trace()
if self.condition_method == 'block-input-film' and self.with_cond[0]:
x = self.film(x, gammas, betas)
# ResBlock input projection
if self.with_input_proj:
if extra_channels is not None and self.extra_channel_freq >= 1:
x = torch.cat([x, extra_channels], 1)
x = self.input_proj(x)
if self.with_intermediate_batchnorm:
x = self.bn0(x)
x = F.relu(x)
out = x
# ResBlock body
if cond_maps is not None:
out = torch.cat([out, cond_maps], 1)
if extra_channels is not None and self.extra_channel_freq >= 2:
out = torch.cat([out, extra_channels], 1)
out = self.conv1(out)
if self.condition_method == 'conv-film' and self.with_cond[0]:
out = self.film(out, gammas, betas)
if self.with_batchnorm:
out = self.bn1(out)
if self.condition_method == 'bn-film' and self.with_cond[0]:
out = self.film(out, gammas, betas)
if self.dropout > 0:
out = self.drop(out)
out = F.relu(out)
if self.condition_method == 'relu-film' and self.with_cond[0]:
out = self.film(out, gammas, betas)
# ResBlock remainder
if self.with_residual:
out = x + out
if self.condition_method == 'block-output-film' and self.with_cond[0]:
out = self.film(out, gammas, betas)
return out
class ConcatFiLMedResBlock(nn.Module):
def __init__(self, num_input, in_dim, out_dim=None, with_residual=True, with_intermediate_batchnorm=False, with_batchnorm=True,
with_cond=[False], dropout=0, num_extra_channels=0, extra_channel_freq=1,
with_input_proj=0, num_cond_maps=0, kernel_size=3, batchnorm_affine=False,
num_layers=1, condition_method='bn-film', debug_every=float('inf')):
super(ConcatFiLMedResBlock, self).__init__()
self.proj = nn.Conv2d(num_input * in_dim, in_dim, kernel_size=1, padding=0)
self.tfilmedResBlock = FiLMedResBlock(in_dim=in_dim, out_dim=out_dim, with_residual=with_residual,
with_intermediate_batchnorm=with_intermediate_batchnorm, with_batchnorm=with_batchnorm,
with_cond=with_cond, dropout=dropout, num_extra_channels=num_extra_channels, extra_channel_freq=extra_channel_freq,
with_input_proj=with_input_proj, num_cond_maps=num_cond_maps, kernel_size=kernel_size, batchnorm_affine=batchnorm_affine,
num_layers=num_layers, condition_method=condition_method, debug_every=debug_every)
def forward(self, x, gammas=None, betas=None, extra_channels=None, cond_maps=None):
out = torch.cat(x, 1) # Concatentate along depth
out = F.relu(self.proj(out))
out = self.tfilmedResBlock(out, gammas=gammas, betas=betas, extra_channels=extra_channels, cond_maps=cond_maps)
return out
class SharedFiLMedModule(nn.Module):
"""Takes 3 inputs:
- the word
- the left input
- the right input
"""
def __init__(self, dim, kernel_size=3,
use_gammas='identity', num_layers=1, with_residual=True,
pool='mean', post_linear=False, learn_embeddings=True):
super().__init__()
if kernel_size % 2 == 0 or post_linear:
raise NotImplementedError()
if learn_embeddings:
self.embed = nn.Embedding(100, dim)
else:
self.embed = None
self.film = FiLM(use_gammas)
for i in range(2 * num_layers):
conv = nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2)
self.add_module('conv' + str(i), conv)
film_computer = nn.Sequential(
nn.Linear(3 * dim, 3 * dim),
nn.ReLU(),
nn.Linear(3 * dim, 2 * dim))
self.add_module('film_computer' + str(i), film_computer)
self.num_layers = num_layers
self.with_residual = with_residual
self.dim = dim
self.pool = pool
def forward(self, x, word, left_inp=None, right_inp=None):
if left_inp is None:
# assuming that interface dimensionality if equal to that
# of stem
left_inp = torch.zeros_like(x)[:, :, 0, 0]
if right_inp is None:
right_inp = torch.zeros_like(left_inp)
embedding = self.embed(word) if self.embed else word
inp = torch.cat([embedding, left_inp, right_inp], 1)
for i in range(self.num_layers):
film0_coeffs = getattr(self, 'film_computer{}'.format(2 * i))(inp)
film1_coeffs = getattr(self, 'film_computer{}'.format(2 * i + 1))(inp)
conv1 = getattr(self, 'conv{}'.format(2 * i))
conv2 = getattr(self, 'conv{}'.format(2 * i + 1))
# a bit ugly now cause we apply film to the input of conv1
out0 = self.film(x, film0_coeffs[:, :self.dim], film0_coeffs[:, self.dim:])
out1 = F.relu(conv1(out0))
out1 = self.film(out1, film1_coeffs[:, :self.dim], film1_coeffs[:, self.dim:])
out2 = F.relu((x if self.with_residual else 0) + conv2(out1))
x = out2
if self.pool == 'mean':
res = x.mean(3).mean(2)
elif self.pool == 'max':
res = x.max(3)[0].max(2)[0]
else:
raise ValueError()
return res
class FiLMModule(nn.Module):
def __init__(self, shared_film_module, word):
super().__init__()
self.shared_film_module = shared_film_module
self.word = torch.LongTensor([word]).to(device)
def forward(self, x, left_inp=None, right_inp=None):
return self.shared_film_module(x, self.word, left_inp, right_inp)
def coord_map(shape, start=-1, end=1):
"""
Gives, a 2d shape tuple, returns two mxn coordinate maps,
Ranging min-max in the x and y directions, respectively.
"""
m, n = shape
x_coord_row = torch.linspace(start, end, steps=n).to(device)
y_coord_row = torch.linspace(start, end, steps=m).to(device)
x_coords = x_coord_row.unsqueeze(0).expand(torch.Size((m, n))).unsqueeze(0)
y_coords = y_coord_row.unsqueeze(1).expand(torch.Size((m, n))).unsqueeze(0)
return Variable(torch.cat([x_coords, y_coords], 0))
| 19,181 | 42.202703 | 133 | py |
CLOSURE | CLOSURE-master/vr/models/seq2seq.py | #!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Seq2Seq(nn.Module):
def __init__(self,
encoder_vocab_size=100,
decoder_vocab_size=100,
wordvec_dim=300,
hidden_dim=256,
rnn_num_layers=2,
rnn_dropout=0,
null_token=0,
start_token=1,
end_token=2,
encoder_embed=None
):
super(Seq2Seq, self).__init__()
self.encoder_embed = nn.Embedding(encoder_vocab_size, wordvec_dim)
self.encoder_rnn = nn.LSTM(wordvec_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_embed = nn.Embedding(decoder_vocab_size, wordvec_dim)
self.decoder_rnn = nn.LSTM(wordvec_dim + hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_rnn_new = nn.LSTM(hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_linear = nn.Linear(hidden_dim, decoder_vocab_size)
self.NULL = null_token
self.START = start_token
self.END = end_token
self.multinomial_outputs = None
def expand_encoder_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.encoder_embed, token_to_idx,
word2vec=word2vec, std=std)
def get_dims(self, x=None, y=None):
V_in = self.encoder_embed.num_embeddings
V_out = self.decoder_embed.num_embeddings
D = self.encoder_embed.embedding_dim
H = self.encoder_rnn.hidden_size
L = self.encoder_rnn.num_layers
N = x.size(0) if x is not None else None
N = y.size(0) if N is None and y is not None else N
T_in = x.size(1) if x is not None else None
T_out = y.size(1) if y is not None else None
return V_in, V_out, D, H, L, N, T_in, T_out
def before_rnn(self, x, replace=0):
# TODO: Use PackedSequence instead of manually plucking out the last
# non-NULL entry of each sequence; it is cleaner and more efficient.
N, T = x.size()
idx = torch.LongTensor(N).fill_(T - 1)
# Find the last non-null element in each sequence. Is there a clean
# way to do this?
x_cpu = x.cpu()
for i in range(N):
for t in range(T - 1):
if x_cpu.data[i, t] != self.NULL and x_cpu.data[i, t + 1] == self.NULL:
idx[i] = t
break
idx = idx.type_as(x.data)
x[x.data == self.NULL] = replace
return x, Variable(idx)
def encoder(self, x):
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(x=x)
x, idx = self.before_rnn(x)
embed = self.encoder_embed(x)
h0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
c0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
out, _ = self.encoder_rnn(embed, (h0, c0))
# Pull out the hidden state for the last non-null value in each input
idx = idx.view(N, 1, 1).expand(N, 1, H)
return out.gather(1, idx).view(N, H)
def decoder(self, encoded, y, h0=None, c0=None):
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
if T_out > 1:
y, _ = self.before_rnn(y)
y_embed = self.decoder_embed(y)
encoded_repeat = encoded.view(N, 1, H).expand(N, T_out, H)
rnn_input = torch.cat([encoded_repeat, y_embed], 2)
if h0 is None:
h0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
if c0 is None:
c0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
rnn_output, (ht, ct) = self.decoder_rnn(rnn_input, (h0, c0))
rnn_output_2d = rnn_output.contiguous().view(N * T_out, H)
output_logprobs = self.decoder_linear(rnn_output_2d).view(N, T_out, V_out)
return output_logprobs, ht, ct
def compute_loss(self, output_logprobs, y):
"""
Compute loss. We assume that the first element of the output sequence y is
a start token, and that each element of y is left-aligned and right-padded
with self.NULL out to T_out. We want the output_logprobs to predict the
sequence y, shifted by one timestep so that y[0] is fed to the network and
then y[1] is predicted. We also don't want to compute loss for padded
timesteps.
Inputs:
- output_logprobs: Variable of shape (N, T_out, V_out)
- y: LongTensor Variable of shape (N, T_out)
"""
self.multinomial_outputs = None
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
mask = y.data != self.NULL
y_mask = Variable(torch.Tensor(N, T_out).fill_(0).type_as(mask))
y_mask[:, 1:] = mask[:, 1:]
y_masked = y[y_mask]
out_mask = Variable(torch.Tensor(N, T_out).fill_(0).type_as(mask))
out_mask[:, :-1] = mask[:, 1:]
out_mask = out_mask.view(N, T_out, 1).expand(N, T_out, V_out)
out_masked = output_logprobs[out_mask].view(-1, V_out)
loss = F.cross_entropy(out_masked, y_masked)
return loss
def forward(self, x, y):
encoded = self.encoder(x)
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(x=x)
T_out = 15
encoded_repeat = encoded.view(N, 1, H).expand(N, T_out, H)
h0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
c0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
rnn_output, (ht, ct) = self.decoder_rnn_new(encoded_repeat, (h0, c0))
output_logprobs, _, _ = self.decoder(encoded, y)
loss = self.compute_loss(output_logprobs, y)
return loss
def reinforce_sample(self, x, max_length=30, temperature=1.0, argmax=False):
N, T = x.size(0), max_length
encoded = self.encoder(x)
y = torch.LongTensor(N, T).fill_(self.NULL)
y_logprobs = torch.Tensor(N, T).fill_(0.)
done = torch.ByteTensor(N).fill_(0)
cur_input = Variable(x.data.new(N, 1).fill_(self.START))
h, c = None, None
for t in range(T):
# generate output
logprobs, h, c = self.decoder(encoded, cur_input, h0=h, c0=c)
logprobs = logprobs[:, 0, :]
logprobs = logprobs / temperature
logprobs = F.log_softmax(logprobs, dim=1)
if argmax:
_, cur_output = logprobs.max(1, keepdim=True)
else:
cur_output = torch.exp(logprobs).multinomial(1) # Now N x 1
# save output
cur_output_data = cur_output.data.cpu()
not_done = logical_not(done)
y[not_done, t] = cur_output_data[not_done, 0]
y_logprobs[not_done, t] = logprobs[not_done, cur_output[:, 0]]
done = logical_or(done, cur_output_data[:, 0] == self.END)
cur_input = cur_output
# stop if fully done
if done.sum() == N:
break
return y.type_as(x.data), y_logprobs.to(x.device)
def logical_and(x, y):
return x * y
def logical_or(x, y):
return (x + y).clamp_(0, 1)
def logical_not(x):
return x == 0
| 7,657 | 38.885417 | 138 | py |
CLOSURE | CLOSURE-master/vr/models/layers.py | #!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import kaiming_normal_, kaiming_uniform_
class SequentialSaveActivations(nn.Sequential):
def forward(self, input_):
self.outputs = [input_]
for module in self._modules.values():
input_ = module(input_)
self.outputs.append(input_)
return input_
class SimpleVisualBlock(nn.Module):
def __init__(self, in_dim, out_dim=None, kernel_size=3):
if out_dim is None:
out_dim = in_dim
super(SimpleVisualBlock, self).__init__()
if kernel_size % 2 == 0:
raise NotImplementedError()
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2)
def forward(self, x):
out = F.relu(self.conv(x))
return out
class ResidualBlock(nn.Module):
def __init__(self, in_dim, out_dim=None, kernel_size=3, with_residual=True, with_batchnorm=True,
shared_block=None, post_linear=False):
if out_dim is None:
out_dim = in_dim
super(ResidualBlock, self).__init__()
if kernel_size % 2 == 0:
raise NotImplementedError()
self.conv1 = nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2)
self.conv2 = nn.Conv2d(out_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2)
self.shared_block=shared_block
self.with_batchnorm = with_batchnorm
if with_batchnorm:
self.bn1 = nn.BatchNorm2d(out_dim)
self.bn2 = nn.BatchNorm2d(out_dim)
self.with_residual = with_residual
if in_dim == out_dim or not with_residual:
self.proj = None
else:
self.proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
if post_linear:
self.post_linear = nn.Conv2d(out_dim, 2 * out_dim, kernel_size=1)
# UGLY HACK!!!
self.post_linear.weight.data[:out_dim, :, 0, 0] = torch.eye(out_dim)
else:
self.post_linear = None
def forward(self, x):
if self.with_batchnorm:
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
else:
out = self.conv2(F.relu(self.conv1(x)))
res = x if self.proj is None else self.proj(x)
if self.with_residual:
out = F.relu(res + out)
else:
out = F.relu(out)
if self.shared_block:
out = self.shared_block(out)
if self.post_linear:
out = self.post_linear(out)
return out
class SimpleConcatBlock(nn.Module):
def __init__(self, dim, kernel_size, shared_block=None):
super().__init__()
self.proj = nn.Conv2d(3 * dim, dim, kernel_size=1, padding=0)
self.impl = ResidualBlock(
dim, dim, kernel_size=kernel_size,
with_residual=True, with_batchnorm=False, shared_block=shared_block)
def forward(self, feats, x, y):
out = torch.cat([feats, x, y], 1) # Concatentate along depth
return self.impl(F.relu(self.proj(out)))
class ConcatBlock(nn.Module):
def __init__(self, dim, kernel_size, with_residual=True, with_batchnorm=True,
shared_block=None, post_linear=False):
super().__init__()
self.proj = nn.Conv2d(2 * dim, dim, kernel_size=1, padding=0)
self.vis_block = ResidualBlock(
dim, kernel_size=kernel_size,
with_residual=with_residual,with_batchnorm=with_batchnorm,
shared_block=shared_block, post_linear=post_linear)
def forward(self, x, y):
out = torch.cat([x, y], 1) # Concatentate along depth
out = F.relu(self.proj(out))
out = self.vis_block(out)
return out
class GlobalAveragePool(nn.Module):
def forward(self, x):
N, C = x.size(0), x.size(1)
return x.view(N, C, -1).mean(2).squeeze(2)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def build_stem(feature_dim,
stem_dim,
module_dim,
num_layers=2,
with_batchnorm=True,
kernel_size=[3],
stride=[1],
padding=None,
subsample_layers=None,
acceptEvenKernel=False):
layers = []
prev_dim = feature_dim
if len(kernel_size) == 1:
kernel_size = num_layers * kernel_size
if len(stride) == 1:
stride = num_layers * stride
if padding == None:
padding = num_layers * [None]
if len(padding) == 1:
padding = num_layers * padding
if subsample_layers is None:
subsample_layers = []
for i, cur_kernel_size, cur_stride, cur_padding in zip(range(num_layers), kernel_size, stride, padding):
curr_out = module_dim if (i == (num_layers-1) ) else stem_dim
if cur_padding is None: # Calculate default padding when None provided
if cur_kernel_size % 2 == 0 and not acceptEvenKernel:
raise(NotImplementedError)
cur_padding = cur_kernel_size // 2
layers.append(nn.Conv2d(prev_dim, curr_out,
kernel_size=cur_kernel_size, stride=cur_stride, padding=cur_padding,
bias=not with_batchnorm))
if with_batchnorm:
layers.append(nn.BatchNorm2d(curr_out))
layers.append(nn.ReLU(inplace=True))
if i in subsample_layers:
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
prev_dim = curr_out
return SequentialSaveActivations(*layers)
class HybridPool(nn.Module):
def __init__(self, width):
super().__init__()
self.maxpool = nn.MaxPool2d(kernel_size=width, stride=width, padding=0)
self.avgpool = nn.AvgPool2d(kernel_size=width, stride=width, padding=0)
def forward(self, x):
return torch.cat([self.maxpool(x), self.avgpool(x)], 1)
def build_classifier(module_C, module_H, module_W, num_answers,
fc_dims=[], proj_dim=None, downsample=None,
with_batchnorm=True, dropout=[]):
layers = []
prev_dim = module_C * module_H * module_W
cur_dim = module_C
if proj_dim is not None and proj_dim > 0:
layers.append(nn.Conv2d(module_C, proj_dim, kernel_size=1, bias=not with_batchnorm))
if with_batchnorm:
layers.append(nn.BatchNorm2d(proj_dim))
layers.append(nn.ReLU(inplace=True))
prev_dim = proj_dim * module_H * module_W
cur_dim = proj_dim
if downsample is not None:
if 'maxpool' in downsample or 'avgpool' in downsample:
pool = nn.MaxPool2d if 'maxpool' in downsample else nn.AvgPool2d
if 'full' in downsample:
assert module_H == module_W
pool_size = module_H
else:
pool_size = int(downsample[-1])
# Note: Potentially sub-optimal padding for non-perfectly aligned pooling
padding = (0 if ((module_H % pool_size == 0) and (module_W % pool_size == 0)) else 1)
layers.append(pool(kernel_size=pool_size, stride=pool_size, padding=padding))
prev_dim = cur_dim * math.ceil(module_H / pool_size) * math.ceil(module_W / pool_size)
if downsample == 'hybrid':
assert module_H == module_W
pool = HybridPool(module_H)
layers.append(pool)
prev_dim = cur_dim * 2
if downsample == 'aggressive':
raise ValueError()
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
layers.append(nn.AvgPool2d(kernel_size=module_H // 2, stride=module_W // 2))
prev_dim = proj_dim
fc_dims = [] # No FC layers here
layers.append(Flatten())
if isinstance(dropout, float):
dropout = [dropout] * len(fc_dims)
elif not dropout:
dropout = [0] * len(fc_dims)
for next_dim, next_dropout in zip(fc_dims, dropout):
layers.append(nn.Linear(prev_dim, next_dim, bias=not with_batchnorm))
if with_batchnorm:
layers.append(nn.BatchNorm1d(next_dim))
layers.append(nn.ReLU(inplace=True))
if next_dropout > 0:
layers.append(nn.Dropout(p=next_dropout))
prev_dim = next_dim
layers.append(nn.Linear(prev_dim, num_answers))
return nn.Sequential(*layers)
def init_modules(modules, init='uniform'):
if init.lower() == 'normal':
init_params = kaiming_normal_
elif init.lower() == 'uniform':
init_params = kaiming_uniform_
else:
return
for m in modules:
if isinstance(m, (nn.Conv2d, nn.Linear)):
init_params(m.weight)
| 9,120 | 36.228571 | 138 | py |
CLOSURE | CLOSURE-master/vr/models/maced_net.py | #!/usr/bin/env python3
import numpy as np
import math
import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models
import math
from torch.nn.init import kaiming_normal, kaiming_uniform, xavier_uniform, xavier_normal, constant
from vr.models.layers import build_classifier, build_stem
import vr.programs
from vr.models.filmed_net import coord_map, SharedFiLMedModule
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class MACControl(nn.Module):
def __init__(self, num_modules, rnn_dim, module_dim):
super().__init__()
self.num_modules = num_modules
self.inputUnits = []
for i in range(self.num_modules):
mod = InputUnit(module_dim)
self.add_module('InputUnit' + str(i+1), mod)
self.inputUnits.append(mod)
self.controlUnit = ControlUnit(module_dim)
self.init_question_transformer = nn.Linear(rnn_dim, module_dim)
self.init_question_non_linear = nn.Tanh()
def forward(self, question):
q_states, q_rep, q_mask = question
q_states = self.init_question_transformer(q_states)
q_rep = self.init_question_transformer(q_rep)
init_control = q_rep
q_rep = self.init_question_non_linear(q_rep)
# TODO: get rid of recurrency
controls = [init_control]
control_scores = [torch.zeros_like(q_states[:, :, 0])]
for fn_num in range(self.num_modules):
inputUnit = self.inputUnits[fn_num]
q_rep_i = inputUnit(q_rep)
control_i, control_scores_i = self.controlUnit(
controls[fn_num], q_rep_i, q_states, q_mask)
controls.append(control_i)
control_scores.append(control_scores_i)
controls = torch.cat([c.unsqueeze(1) for c in controls], 1) # N x M x D
control_scores = torch.cat([c.unsqueeze(1) for c in control_scores], 1) # N x M x T
return controls, control_scores
class MAC(nn.Module):
"""Implementation of the Compositional Attention Networks from: https://openreview.net/pdf?id=S1Euwz-Rb"""
def __init__(self, vocab, feature_dim,
stem_num_layers,
stem_batchnorm,
stem_kernel_size,
stem_subsample_layers,
stem_stride,
stem_padding,
stem_dim,
num_modules,
module_dim,
question_embedding_dropout,
stem_dropout,
memory_dropout,
read_dropout,
nonlinearity,
use_prior_control_in_control_unit,
use_self_attention,
use_memory_gate,
question2output,
classifier_batchnorm,
classifier_fc_layers,
classifier_dropout,
use_coords,
write_unit,
read_connect,
read_unit,
noisy_controls,
debug_every=float('inf'),
print_verbose_every=float('inf'),
hard_code_control=False,
pretrained_control=None,
verbose=True,
):
super().__init__()
num_answers = len(vocab['answer_idx_to_token'])
self.stem_times = []
self.module_times = []
self.classifier_times = []
self.timing = False
self.num_modules = num_modules
self.question_embedding_dropout = question_embedding_dropout
self.memory_dropout = memory_dropout
self.read_dropout = read_dropout
self.module_dim = module_dim
self.read_connect = read_connect
self.question2output = question2output
self.use_self_attention = use_self_attention == 1
self.use_memory_gate = use_memory_gate == 1
self.use_coords_freq = use_coords
self.debug_every = debug_every
self.print_verbose_every = print_verbose_every
# Initialize helper variables
self.stem_use_coords = self.use_coords_freq
self.extra_channel_freq = self.use_coords_freq
self.fwd_count = 0
self.num_extra_channels = 2 if self.use_coords_freq > 0 else 0
if self.debug_every <= -1:
self.print_verbose_every = 1
# Initialize stem
stem_feature_dim = feature_dim[0] + self.stem_use_coords * self.num_extra_channels
self.stem = build_stem(stem_feature_dim, stem_dim, module_dim,
num_layers=stem_num_layers, with_batchnorm=stem_batchnorm,
kernel_size=stem_kernel_size, stride=stem_stride, padding=stem_padding,
subsample_layers=stem_subsample_layers, acceptEvenKernel=True)
#Define units
self.inputUnits = []
for i in range(self.num_modules):
mod = InputUnit(module_dim)
self.add_module('InputUnit' + str(i+1), mod)
self.inputUnits.append(mod)
self.controlUnit = ControlUnit(module_dim, use_prior_control_in_control_unit=use_prior_control_in_control_unit)
if read_unit == 'original':
self.readUnit = ReadUnit(module_dim, nonlinearity, self.read_dropout)
elif read_unit == 'film':
self.readUnit = SharedFiLMedModule(module_dim, learn_embeddings=False)
else:
raise ValueError()
if write_unit == 'original':
mod = WriteUnit(module_dim,
use_self_attention=self.use_self_attention,
use_memory_gate=self.use_memory_gate)
elif write_unit == 'gru':
mod = GRUWriteUnit(module_dim)
elif write_unit == 'lastread':
mod = LastReadWriteUnit()
elif write_unit == 'noop':
mod = NoOpWriteUnit()
else:
raise ValueError(mod)
self.add_module('WriteUnit', mod)
self.writeUnit = mod
#parameters for initial memory and control vectors
self.init_memory = nn.Parameter(torch.randn(module_dim).to(device))
#first transformation of question embeddings
self.init_question_transformer = nn.Linear(self.module_dim, self.module_dim)
self.init_question_non_linear = nn.Tanh()
self.vocab = vocab
self.question_embedding_dropout_module = nn.Dropout(p=self.question_embedding_dropout)
# Initialize output classifier
self.classifier = OutputUnit(
module_dim, classifier_fc_layers, num_answers,
with_batchnorm=classifier_batchnorm, dropout=classifier_dropout,
nonlinearity=nonlinearity, question2output=question2output)
init_modules(self.modules())
def forward(self, x, ques, isTest=False, save_activations=False):
# Initialize forward pass and externally viewable activations
self.fwd_count += 1
if save_activations:
self.cf_input = None
q_context, q_rep, q_mask = ques
original_q_rep = q_rep
q_rep = self.question_embedding_dropout_module(q_rep)
init_control = q_rep
q_rep = self.init_question_non_linear(self.init_question_transformer(q_rep))
stem_batch_coords = None
if self.use_coords_freq > 0:
stem_coords = coord_map((x.size(2), x.size(3)))
stem_batch_coords = stem_coords.unsqueeze(0).expand(
torch.Size((x.size(0), *stem_coords.size())))
if self.stem_use_coords:
x = torch.cat([x, stem_batch_coords], 1)
feats = self.stem(x)
if save_activations:
self.feats = feats
self.read_scores = []
N, _, H, W = feats.size()
memory_storage = torch.zeros(N, 1+self.num_modules, self.module_dim).to(device)
memory_storage[:,0,:] = self.init_memory.expand(N, self.module_dim)
if self.memory_dropout > 0. and not isTest:
dropout_mask_memory = torch.Tensor(N, self.module_dim).fill_(
self.memory_dropout).bernoulli_().to(device)
else:
dropout_mask_memory = None
# compute controls
controls = [init_control]
control_scores = [torch.zeros_like(q_context[:, :, 0])]
for fn_num in range(self.num_modules):
inputUnit = getattr(self, 'InputUnit{}'.format(fn_num + 1))
#compute question representation specific to this cell
q_rep_i = inputUnit(q_rep) # N x d
#compute control at the current step
control_i, control_scores_i = self.controlUnit(
controls[fn_num], q_rep_i, q_context, q_mask)
controls.append(control_i)
control_scores.append(control_scores_i)
controls = torch.cat([c.unsqueeze(1) for c in controls], 1) # N x M x D
control_scores = torch.cat([c.unsqueeze(1) for c in control_scores], 1) # N x M x T
# run that reasoning
for fn_num in range(self.num_modules):
inputUnit = getattr(self, 'InputUnit{}'.format(fn_num + 1))
#compute read at the current step
read_input = memory_storage[:,fn_num,:]
if isinstance(self.readUnit, ReadUnit):
read_i, read_scores_i = self.readUnit(
read_input, controls[:,(fn_num+1),:], feats,
memory_dropout=self.memory_dropout, dropout_mask_memory=dropout_mask_memory,
isTest=isTest)
else:
read_i = self.readUnit(feats, controls[:,(fn_num+1),:])
read_scores_i = torch.Tensor([0])
#compute write memeory at the current step
memory_i = self.writeUnit(memory_storage, controls, read_i, fn_num+1)
if fn_num == (self.num_modules - 1):
final_module_output = memory_i
else:
memory_updated = memory_storage.clone()
memory_updated[:,(fn_num+1),:] = memory_updated[:,(fn_num+1),:] + memory_i
memory_storage = memory_updated
if save_activations:
self.read_scores.append(read_scores_i)
if save_activations:
self.cf_input = final_module_output
self.controls = controls
self.control_scores = control_scores
self.memory_storage = memory_storage
self.read_scores = torch.cat([rs.unsqueeze(1) for rs in self.read_scores], 1)
# output time
out = self.classifier(final_module_output, original_q_rep, isTest=isTest)
return out
class OutputUnit(nn.Module):
def __init__(self, module_dim, hidden_units, num_outputs,
nonlinearity, with_batchnorm, dropout, question2output):
super().__init__()
self.dropout = dropout
self.question2output = question2output
if question2output:
self.question_transformer = nn.Linear(module_dim, module_dim)
input_dim = 2*module_dim if question2output else module_dim
hidden_units = [input_dim] + [h for h in hidden_units] + [num_outputs]
self.n_layers = len(hidden_units) - 1
for i, (nin, nout) in enumerate(zip(hidden_units, hidden_units[1:])):
mod = nn.Linear(nin, nout)
self.add_module('MAC_LinearFC' + str(i), mod)
mod = nn.BatchNorm1d(nin) if with_batchnorm else None
if mod is not None:
self.add_module('MAC_BatchNormFC' + str(i), mod)
else:
setattr(self, 'MAC_BatchNormFC' + str(i), None)
self.non_linear = nn.ReLU()
self.dropout_module = nn.Dropout(p=self.dropout)
init_modules(self.modules())
def forward(self, final_memory, original_q_rep, isTest=False):
if self.question2output:
transformed_question = self.question_transformer(original_q_rep)
features = torch.cat([final_memory, transformed_question], 1)
else:
features = final_memory
for i in range(self.n_layers):
batchnorm = getattr(self, 'MAC_BatchNormFC' + str(i))
if batchnorm is not None:
features = batchnorm(features)
features = self.dropout_module(features)
linear = getattr(self, 'MAC_LinearFC' + str(i))
features = linear(features)
if i + 1 < self.n_layers:
features = self.non_linear(features)
return features
class NoOpWriteUnit(nn.Module):
def forward(self, memories, controls, current_read, idx):
return torch.zeros_like(current_read)
class LastReadWriteUnit(nn.Module):
def forward(self, memories, controls, current_read, idx):
return current_read
class GRUWriteUnit(nn.Module):
def __init__(self, common_dim):
super().__init__()
self.gru = nn.GRUCell(common_dim, common_dim)
def forward(self, memories, controls, current_read, idx):
return self.gru.forward(current_read, memories[:, idx - 1, :])
class WriteUnit(nn.Module):
def __init__(self, common_dim, use_self_attention=False, use_memory_gate=False):
super(WriteUnit, self).__init__()
self.common_dim = common_dim
self.use_self_attention = use_self_attention
self.use_memory_gate = use_memory_gate
self.control_memory_transfomer = nn.Linear(2 * common_dim, common_dim) #Eq (w1)
if use_self_attention:
self.current_control_transformer = nn.Linear(common_dim, common_dim)
self.control_transformer = nn.Linear(common_dim, 1) #Eq (w2.1)
self.acc_memory_transformer = nn.Linear(common_dim, common_dim, bias=False)
self.pre_memory_transformer = nn.Linear(common_dim, common_dim) #Eq (w2.3)
if use_memory_gate:
self.gated_control_transformer = nn.Linear(common_dim, 1) #Eq (w3.1)
self.non_linear = nn.Sigmoid()
init_modules(self.modules())
def forward(self, memories, controls, current_read, idx):
#memories (N x num_cell x d), controls (N x num_cell x d), current_read (N x d), idx (int starting from 1)
prior_memory = memories[:,idx-1,:]
#Eq (w1)
res_memory = self.control_memory_transfomer( torch.cat([current_read, prior_memory], 1) ) #N x d
if self.use_self_attention:
current_control = controls[:,idx,:] # N x d
current_control = self.current_control_transformer(current_control) # N x d in code
if idx > 1:
#Eq (w2.1)
previous_controls = controls[:,1:idx,:] # N x (idx-1) x d
cscores = previous_controls * current_control.unsqueeze(1) # N x (idx-1) x d
cscores = self.control_transformer(cscores).squeeze(2) # N x (idx -1)
cscores = torch.exp(cscores - cscores.max(1, keepdim=True)[0]) # N x (idx -1)
cscores = cscores / cscores.sum(1, keepdim=True) # N x (idx -1)
#Eq (w2.2)
previous_memories = memories[:,1:idx,:] #N x (idx-1) x d
acc_memory = (previous_memories * cscores.unsqueeze(2)).sum(1) # N x d
#Eq (w2.3)
res_memory = self.acc_memory_transformer(acc_memory) + self.pre_memory_transformer(res_memory)
else:
#Eq (w2.3) as there is no m_i^{sa} in this case
res_memory = self.pre_memory_transformer(res_memory)
if self.use_memory_gate:
#Eq (w3.1)
gated_control = self.gated_control_transformer(controls[:,idx,:]) #N x 1
#Eq (w3.2)
gated_control = self.non_linear(gated_control) #-1)
res_memory = memories[:,idx-1,:] * gated_control + res_memory * (1. - gated_control)
return res_memory
class ReadUnit(nn.Module):
def __init__(self, common_dim, nonlinearity, read_dropout=0.):
super().__init__()
self.common_dim = common_dim
self.read_dropout = read_dropout
#Eq (r1)
self.pre_memory_transformer = nn.Linear(common_dim, common_dim)
self.image_element_transformer = nn.Linear(common_dim, common_dim)
#Eq (r2)
self.intermediate_transformer = nn.Linear(2 * common_dim, common_dim)
#self.intermediate_transformer_2 = nn.Linear(common_dim, common_dim)
#Eq (r3.1)
self.read_attention_transformer = nn.Linear(common_dim, 1)
self.non_linear = getattr(nn, nonlinearity)()
self.read_dropout_module = nn.Dropout(p=self.read_dropout)
init_modules(self.modules())
def forward(self, pre_memory, current_control, image,
memory_dropout=0., dropout_mask_memory=None, isTest=False):
#pre_memory(Nxd), current_control(Nxd), image(NxdxHxW)
image = image.transpose(1,2).transpose(2,3) #NXHxWxd
trans_image = image
if not isTest and memory_dropout > 0.:
assert dropout_mask_memory is not None
pre_memory = (pre_memory / (1. - memory_dropout)) * dropout_mask_memory
pre_memory = self.read_dropout_module(pre_memory)
trans_image = self.read_dropout_module(trans_image)
#Eq (r1)
trans_pre_memory = self.pre_memory_transformer(pre_memory) #Nxd
trans_image = self.image_element_transformer(trans_image) #NxHxWxd image
trans_pre_memory = trans_pre_memory.unsqueeze(1).unsqueeze(2).expand(trans_image.size()) #NxHxWxd
intermediate = trans_pre_memory * trans_image #NxHxWxd
#Eq (r2)
#trans_intermediate = self.intermediate_transformer(torch.cat([intermediate, image], 3)) #NxHxWxd
trans_intermediate = self.intermediate_transformer(torch.cat([intermediate, trans_image], 3)) #NxHxWxd
trans_intermediate = self.non_linear(trans_intermediate)
#trans_intermediate = self.intermediate_transformer_2(trans_intermediate)
#Eq (r3.1)
trans_current_control = current_control.unsqueeze(1).unsqueeze(2).expand(trans_intermediate.size()) #NxHxWxd
intermediate_score = trans_current_control * trans_intermediate
intermediate_score = self.non_linear(intermediate_score)
intermediate_score = self.read_dropout_module(intermediate_score)
scores = self.read_attention_transformer(intermediate_score).squeeze(3) #NxHxWx1 -> NxHxW
#Eq (r3.2): softmax
rscores = scores.view(scores.shape[0], -1) #N x (H*W)
rscores = torch.exp(rscores - rscores.max(1, keepdim=True)[0])
rscores = rscores / rscores.sum(1, keepdim=True)
scores = rscores.view(scores.shape) #NxHxW
#Eq (r3.3)
readrep = image * scores.unsqueeze(3)
readrep = readrep.view(readrep.shape[0], -1, readrep.shape[-1]) #N x (H*W) x d
readrep = readrep.sum(1) #N x d
return readrep, scores
class ControlUnit(nn.Module):
def __init__(self, common_dim, use_prior_control_in_control_unit=False):
super().__init__()
self.common_dim = common_dim
self.use_prior_control_in_control_unit = use_prior_control_in_control_unit
if use_prior_control_in_control_unit:
self.control_question_transformer = nn.Linear(2 * common_dim, common_dim) #Eq (c1)
self.score_transformer = nn.Linear(common_dim, 1) # Eq (c2.1)
init_modules(self.modules())
def forward(self, pre_control, question, context, mask):
#pre_control (Nxd), question (Nxd), context(NxLxd), mask(NxL)
#Eq (c1)
if self.use_prior_control_in_control_unit:
control_question = self.control_question_transformer(torch.cat([pre_control, question], 1)) # N x d
else:
control_question = question # N x d
#Eq (c2.1)
scores = self.score_transformer(context * control_question.unsqueeze(1)).squeeze(2) #NxLxd -> NxLx1 -> NxL
#Eq (c2.2) : softmax
scores = torch.exp(scores - scores.max(1, keepdim=True)[0]) * mask #mask help to eliminate null tokens
scores = scores / scores.sum(1, keepdim=True) #NxL
#Eq (c2.3)
control = (context * scores.unsqueeze(2)).sum(1) #Nxd
return control, scores
class InputUnit(nn.Module):
def __init__(self, common_dim):
super().__init__()
self.common_dim = common_dim
self.question_transformer = nn.Linear(common_dim, common_dim)
init_modules(self.modules())
def forward(self, question):
return self.question_transformer(question) #Section 2.1
def sincos_coord_map(shape, p_h=64., p_w=64.):
m, n = shape
x_coords = torch.zeros(m,n)
y_coords = torch.zeros(m,n)
for i in range(m):
for j in range(n):
icoord = i if i % 2 == 0 else i-1
jcoord = j if j % 2 == 0 else j-1
x_coords[i, j] = math.sin(1.0 * i / (10000. ** (1.0 * jcoord / p_h)))
y_coords[i, j] = math.cos(1.0 * j / (10000. ** (1.0 * icoord / p_w)))
x_coords = torch.Tensor(x_coords).to(device).unsqueeze(0)
y_coords = torch.Tensor(y_coords).to(device).unsqueeze(0)
return Variable(torch.cat([x_coords, y_coords], 0))
def init_modules(modules, init='uniform'):
if init.lower() == 'normal':
init_params = xavier_normal
elif init.lower() == 'uniform':
init_params = xavier_uniform
else:
return
for m in modules:
if isinstance(m, (nn.Conv2d, nn.Linear)):
init_params(m.weight)
if m.bias is not None: constant(m.bias, 0.)
| 21,616 | 37.809695 | 119 | py |
CLOSURE | CLOSURE-master/vr/models/convlstm.py | #!/usr/bin/env python3
import torch
import torch.nn as nn
from torch.autograd import Variable
from vr.models.layers import (build_classifier,
build_stem,
init_modules)
class ConvLSTM(nn.Module):
def __init__(self,
vocab,
feature_dim=[3, 64, 64],
stem_dim=128,
module_dim=128,
stem_num_layers=2,
stem_batchnorm=True,
stem_kernel_size=3,
stem_stride=1,
stem_padding=None,
stem_feature_dim=24,
stem_subsample_layers=None,
classifier_fc_layers=(1024,),
classifier_batchnorm=False,
classifier_dropout=0,
rnn_hidden_dim=128,
**kwargs):
super().__init__()
# initialize stem
self.stem = build_stem(feature_dim[0],
stem_dim,
module_dim,
num_layers=stem_num_layers,
with_batchnorm=stem_batchnorm,
kernel_size=stem_kernel_size,
stride=stem_stride,
padding=stem_padding,
subsample_layers=stem_subsample_layers)
tmp = self.stem(Variable(torch.zeros([1] + feature_dim)))
_, F, H, W = tmp.size()
# initialize classifier
# TODO(mnoukhov): fix this for >1 layer RNN
question_dim = rnn_hidden_dim
image_dim = F*H*W
num_answers = len(vocab['answer_idx_to_token'])
self.classifier = build_classifier(image_dim + question_dim,
1,
1,
num_answers,
classifier_fc_layers,
None,
None,
classifier_batchnorm,
classifier_dropout)
init_modules(self.modules())
def forward(self, image, question):
# convert image to features
img_feats = self.stem(image) # N x F x H x W
img_feats = img_feats.view(img_feats.size(0), -1) # N x F*H*W
# get hidden state from question
_, q_feats, _ = question # N x Q
# concatenate feats
feats = torch.cat([img_feats, q_feats], dim=1) # N x F*H*W+Q
# pass through classifier
out = self.classifier(feats)
return out
| 2,782 | 35.142857 | 73 | py |
CLOSURE | CLOSURE-master/vr/models/film_gen.py | #!/usr/bin/env python3
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from vr.models.layers import init_modules
from torch.nn.init import uniform_, xavier_uniform_, constant_
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FiLMGen(nn.Module):
def __init__(self,
null_token=0,
start_token=1,
end_token=2,
encoder_embed=None,
encoder_vocab_size=100,
decoder_vocab_size=100,
wordvec_dim=200,
hidden_dim=512,
rnn_num_layers=1,
rnn_dropout=0,
output_batchnorm=False,
bidirectional=False,
encoder_type='gru',
decoder_type='linear',
gamma_option='linear',
gamma_baseline=1,
num_modules=4,
module_num_layers=1,
module_dim=128,
parameter_efficient=False,
debug_every=float('inf'),
taking_context=False,
variational_embedding_dropout=0.,
embedding_uniform_boundary=0.,
use_attention=False,
):
super(FiLMGen, self).__init__()
self.use_attention = use_attention
self.taking_context = taking_context
if self.use_attention:
#if we want to use attention, the full context should be computed
self.taking_context = True
if self.taking_context:
#if we want to use the full context, it makes sense to use bidirectional modeling.
bidirectional = True
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.output_batchnorm = output_batchnorm
self.bidirectional = bidirectional
self.num_dir = 2 if self.bidirectional else 1
self.gamma_option = gamma_option
self.gamma_baseline = gamma_baseline
self.num_modules = num_modules
self.module_num_layers = module_num_layers
self.module_dim = module_dim
self.debug_every = debug_every
self.NULL = null_token
self.START = start_token
self.END = end_token
self.variational_embedding_dropout = variational_embedding_dropout
if self.bidirectional: # and not self.taking_context:
if decoder_type != 'linear':
raise(NotImplementedError)
hidden_dim = (int) (hidden_dim / self.num_dir)
self.func_list = {
'linear': None,
'sigmoid': F.sigmoid,
'tanh': F.tanh,
'exp': torch.exp,
}
self.cond_feat_size = 2 * self.module_dim * self.module_num_layers # FiLM params per ResBlock
if not parameter_efficient: # parameter_efficient=False only used to load older trained models
self.cond_feat_size = 4 * self.module_dim + 2 * self.num_modules
self.encoder_embed = nn.Embedding(encoder_vocab_size, wordvec_dim)
self.encoder_rnn = init_rnn(self.encoder_type, wordvec_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, bidirectional=self.bidirectional)
self.decoder_rnn = init_rnn(self.decoder_type, hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, bidirectional=self.bidirectional)
if self.taking_context:
self.decoder_linear = None #nn.Linear(2 * hidden_dim, hidden_dim)
for n, p in self.encoder_rnn.named_parameters():
if n.startswith('weight'): xavier_uniform_(p)
elif n.startswith('bias'): constant_(p, 0.)
else:
self.decoder_linear = nn.Linear(hidden_dim * self.num_dir, self.num_modules * self.cond_feat_size)
if self.use_attention:
# Florian Strub used Tanh here, but let's use identity to make this model
# closer to the baseline film version
#Need to change this if we want a different mechanism to compute attention weights
attention_dim = self.module_dim
self.context2key = nn.Linear(hidden_dim * self.num_dir, self.module_dim)
# to transform control vector to film coefficients
self.last_vector2key = []
self.decoders_att = []
for i in range(num_modules):
mod = nn.Linear(hidden_dim * self.num_dir, attention_dim)
self.add_module("last_vector2key{}".format(i), mod)
self.last_vector2key.append(mod)
mod = nn.Linear(hidden_dim * self.num_dir, 2*self.module_dim)
self.add_module("decoders_att{}".format(i), mod)
self.decoders_att.append(mod)
if self.output_batchnorm:
self.output_bn = nn.BatchNorm1d(self.cond_feat_size, affine=True)
init_modules(self.modules())
if embedding_uniform_boundary > 0.:
uniform_(self.encoder_embed.weight, -1.*embedding_uniform_boundary, embedding_uniform_boundary)
# The attention scores will be saved here if the attention is used.
self.scores = None
def expand_encoder_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.encoder_embed, token_to_idx,
word2vec=word2vec, std=std)
def get_dims(self, x=None):
V_in = self.encoder_embed.num_embeddings
V_out = self.cond_feat_size
D = self.encoder_embed.embedding_dim
H = self.encoder_rnn.hidden_size
H_full = self.encoder_rnn.hidden_size * self.num_dir
L = self.encoder_rnn.num_layers * self.num_dir
N = x.size(0) if x is not None else None
T_in = x.size(1) if x is not None else None
T_out = self.num_modules
return V_in, V_out, D, H, H_full, L, N, T_in, T_out
def before_rnn(self, x, replace=0):
N, T = x.size()
idx = torch.LongTensor(N).fill_(T - 1)
#mask to specify non-null tokens
mask = torch.FloatTensor(N, T).zero_()
# Find the last non-null element in each sequence.
x_cpu = x.cpu()
for i in range(N):
for t in range(T - 1):
if x_cpu.data[i, t] != self.NULL and x_cpu.data[i, t + 1] == self.NULL:
idx[i] = t
break
for i in range(N):
for t in range(T):
if x_cpu.data[i, t] not in [self.NULL]:
mask[i, t] = 1.
idx = idx.type_as(x.data)
x[x.data == self.NULL] = replace
return x, idx, mask.to(device)
def encoder(self, x, isTest=False):
V_in, V_out, D, H, H_full, L, N, T_in, T_out = self.get_dims(x=x)
x, idx, mask = self.before_rnn(x) # Tokenized word sequences (questions), end index
if self.taking_context:
lengths = torch.LongTensor(idx.shape).fill_(1) + idx.data.cpu()
lengths = lengths.to(device)
seq_lengths, perm_idx = lengths.sort(0, descending=True)
iperm_idx = torch.LongTensor(perm_idx.shape).fill_(0).to(device)
for i, v in enumerate(perm_idx):
iperm_idx[v.data] = i
x = x[perm_idx]
embed = self.encoder_embed(x)
h0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
if self.encoder_type == 'lstm':
c0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
if self.variational_embedding_dropout > 0. and not isTest:
varDrop = torch.Tensor(N, D).fill_(self.variational_embedding_dropout).bernoulli_().to(device)
embed = (embed / (1. - self.variational_embedding_dropout)) * varDrop.unsqueeze(1)
if self.taking_context:
embed = pack_padded_sequence(embed, seq_lengths.data.cpu().numpy(), batch_first=True)
if self.encoder_type == 'lstm':
out, (hn, _) = self.encoder_rnn(embed, (h0, c0))
elif self.encoder_type == 'gru':
out, hn = self.encoder_rnn(embed, h0)
hn = hn.transpose(1,0).contiguous()
hn = hn.view(hn.shape[0], -1)
# Pull out the hidden state for the last non-null value in each input
if self.taking_context:
idx_out = None
out, _ = pad_packed_sequence(out, batch_first=True)
out = out[iperm_idx]
if out.shape[1] < T_in:
out = F.pad(out, (0, 0, 0, T_in - out.shape[1], 0, 0))
#mask = mask[:, :(out.shape[1]-T_in)] #The packing truncate the original length so we need to change mask to fit it
hn = hn[iperm_idx]
else:
idx = idx.view(N, 1, 1).expand(N, 1, H_full)
idx_out = out.gather(1, idx).view(N, H_full)
out = None
hn = None
return idx_out, out, hn, mask
def decoder(self, encoded, dims, h0=None, c0=None):
#if self.taking_context:
# return self.decoder_linear(encoded)
V_in, V_out, D, H, H_full, L, N, T_in, T_out = dims
if self.decoder_type == 'linear':
# (N x H) x (H x T_out*V_out) -> (N x T_out*V_out) -> N x T_out x V_out
return self.decoder_linear(encoded).view(N, T_out, V_out), (None, None)
encoded_repeat = encoded.view(N, 1, H).expand(N, T_out, H)
if not h0:
h0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
if self.decoder_type == 'lstm':
if not c0:
c0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
rnn_output, (ht, ct) = self.decoder_rnn(encoded_repeat, (h0, c0))
elif self.decoder_type == 'gru':
ct = None
rnn_output, ht = self.decoder_rnn(encoded_repeat, h0)
rnn_output_2d = rnn_output.contiguous().view(N * T_out, H)
linear_output = self.decoder_linear(rnn_output_2d)
if self.output_batchnorm:
linear_output = self.output_bn(linear_output)
output_shaped = linear_output.view(N, T_out, V_out)
return output_shaped, (ht, ct)
def attention_decoder(self, context, last_vector, mask):
context_keys = self.context2key(context)
out = []
self.scores = []
for i in range(self.num_modules):
# vanilla dot-product attention in the key space
query = self.last_vector2key[i](last_vector)
scores = (context_keys * query.unsqueeze(1)).sum(2) #NxLxd -> NxL
# softmax
scores = torch.exp(scores - scores.max(1, keepdim=True)[0]) * mask #mask help to eliminate padding words
scores = scores / scores.sum(1, keepdim=True) #NxL
self.scores.append(scores)
control = (context * scores.unsqueeze(2)).sum(1) #Nxd
coefficients = self.decoders_att[i](control).unsqueeze(1) #Nxd -> Nx2d -> Nx1x2d
out.append(coefficients)
self.scores = torch.cat([t.unsqueeze(0) for t in self.scores], 0)
if len(out) == 0: return None
if len(out) == 1: return out[0]
return torch.cat(out, 1) #N x num_module x 2d
def forward(self, x, isTest=False):
if self.debug_every <= -2:
pdb.set_trace()
encoded, whole_context, last_vector, mask = self.encoder(x, isTest=isTest)
if self.taking_context and not self.use_attention:
#whole_context = self.decoder(whole_context, None)
return (whole_context, last_vector, mask)
if self.use_attention: #make sure taking_context is True as well if we want to use this.
film_pre_mod = self.attention_decoder(whole_context, last_vector, mask)
else:
film_pre_mod, _ = self.decoder(encoded, self.get_dims(x=x))
film = self.modify_output(film_pre_mod, gamma_option=self.gamma_option,
gamma_shift=self.gamma_baseline)
return film
def modify_output(self, out, gamma_option='linear', gamma_scale=1, gamma_shift=0,
beta_option='linear', beta_scale=1, beta_shift=0):
gamma_func = self.func_list[gamma_option]
beta_func = self.func_list[beta_option]
gs = []
bs = []
for i in range(self.module_num_layers):
gs.append(slice(i * (2 * self.module_dim), i * (2 * self.module_dim) + self.module_dim))
bs.append(slice(i * (2 * self.module_dim) + self.module_dim, (i + 1) * (2 * self.module_dim)))
if gamma_func is not None:
for i in range(self.module_num_layers):
out[:,:,gs[i]] = gamma_func(out[:,:,gs[i]])
if gamma_scale != 1:
for i in range(self.module_num_layers):
out[:,:,gs[i]] = out[:,:,gs[i]] * gamma_scale
if gamma_shift != 0:
for i in range(self.module_num_layers):
out[:,:,gs[i]] = out[:,:,gs[i]] + gamma_shift
if beta_func is not None:
for i in range(self.module_num_layers):
out[:,:,bs[i]] = beta_func(out[:,:,bs[i]])
out[:,:,b2] = beta_func(out[:,:,b2])
if beta_scale != 1:
for i in range(self.module_num_layers):
out[:,:,bs[i]] = out[:,:,bs[i]] * beta_scale
if beta_shift != 0:
for i in range(self.module_num_layers):
out[:,:,bs[i]] = out[:,:,bs[i]] + beta_shift
return out
def init_rnn(rnn_type, hidden_dim1, hidden_dim2, rnn_num_layers,
dropout=0, bidirectional=False):
if rnn_type == 'gru':
return nn.GRU(hidden_dim1, hidden_dim2, rnn_num_layers, dropout=dropout,
batch_first=True, bidirectional=bidirectional)
elif rnn_type == 'lstm':
return nn.LSTM(hidden_dim1, hidden_dim2, rnn_num_layers, dropout=dropout,
batch_first=True, bidirectional=bidirectional)
elif rnn_type == 'linear':
return None
else:
print('RNN type ' + str(rnn_type) + ' not yet implemented.')
raise(NotImplementedError)
| 13,957 | 39.34104 | 131 | py |
CLOSURE | CLOSURE-master/vr/models/module_net.py | #!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models
from vr.models.layers import (
init_modules, ResidualBlock, GlobalAveragePool, Flatten,
build_classifier, build_stem, ConcatBlock, SimpleConcatBlock)
import vr.programs
from torch.nn.init import kaiming_normal, kaiming_uniform, xavier_uniform, xavier_normal, constant
from torch.autograd import Function
from vr.models.filmed_net import FiLM, FiLMedResBlock, ConcatFiLMedResBlock, coord_map, SharedFiLMedModule, FiLMModule
from vr.models.maced_net import MACControl
class ModuleNet(nn.Module):
def __init__(self, vocab, feature_dim,
use_film,
use_simple_block,
stem_num_layers,
stem_batchnorm,
stem_subsample_layers,
stem_kernel_size,
stem_stride,
stem_padding,
stem_dim,
module_dim,
module_pool,
module_use_gammas,
module_kernel_size,
module_input_proj,
module_residual=True,
module_batchnorm=False,
module_num_layers=1,
mod_id_loss=False,
kl_loss=False,
learn_control=False,
rnn_dim=None,
classifier_proj_dim=512,
classifier_downsample='maxpool2',
classifier_fc_layers=(1024,),
classifier_batchnorm=False,
classifier_dropout=0,
discriminator_proj_dim=None,
discriminator_downsample=None,
discriminator_fc_layers=None,
discriminator_dropout=None,
verbose=True,
type_anonymizer=False):
super(ModuleNet, self).__init__()
if discriminator_proj_dim is None:
discriminator_proj_dim = classifier_proj_dim
if discriminator_downsample is None:
discriminator_downsample = classifier_downsample
if discriminator_fc_layers is None:
discriminator_fc_layers = classifier_fc_layers
if discriminator_dropout is None:
discriminator_dropout = classifier_dropout
self.module_dim = module_dim
self.use_film = use_film
self.use_simple_block = use_simple_block
self.mod_id_loss = mod_id_loss
self.kl_loss = kl_loss
self.learn_control = learn_control
self.stem = build_stem(feature_dim[0], stem_dim, module_dim,
num_layers=stem_num_layers,
subsample_layers=stem_subsample_layers,
kernel_size=stem_kernel_size,
padding=stem_padding,
with_batchnorm=stem_batchnorm)
tmp = self.stem(Variable(torch.zeros([1, feature_dim[0], feature_dim[1], feature_dim[2]])))
module_H = tmp.size(2)
module_W = tmp.size(3)
self.coords = coord_map((module_H, module_W))
if verbose:
print('Here is my stem:')
print(self.stem)
classifier_kwargs = dict(module_C=module_dim, module_H=module_H, module_W=module_W,
num_answers=len(vocab['answer_idx_to_token']),
fc_dims=classifier_fc_layers,
proj_dim=classifier_proj_dim,
downsample=classifier_downsample,
with_batchnorm=classifier_batchnorm,
dropout=classifier_dropout)
discriminator_kwargs = dict(module_C=module_dim, module_H=module_H, module_W=module_W,
num_answers=len(vocab['program_idx_to_token']),
fc_dims=discriminator_fc_layers,
proj_dim=discriminator_proj_dim,
downsample=discriminator_downsample,
with_batchnorm=False,
dropout=discriminator_dropout)
if self.use_film:
classifier_kwargs['module_H'] = 1
classifier_kwargs['module_W'] = 1
discriminator_kwargs['module_H'] = 1
discriminator_kwargs['module_W'] = 1
self.classifier = build_classifier(**classifier_kwargs)
if self.mod_id_loss:
self.module_identifier = build_classifier(**discriminator_kwargs)
if verbose:
print('Here is my classifier:')
print(self.classifier)
self.function_modules = {}
self.function_modules_num_inputs = {}
self.vocab = vocab
shared_block = None
if type_anonymizer:
shared_block = ResidualBlock(module_dim,
kernel_size=module_kernel_size,
with_residual=module_residual,
with_batchnorm=module_batchnorm)
elif use_film == 1:
assert module_W == module_H
shared_block = SharedFiLMedModule(module_dim,
kernel_size=module_kernel_size,
num_layers=module_num_layers,
with_residual=module_residual,
pool=module_pool,
use_gammas=module_use_gammas,
post_linear=kl_loss,
learn_embeddings=not learn_control)
if shared_block:
self.shared_block = shared_block
self.add_module('shared', shared_block)
for fn_str, fn_idx in vocab['program_token_to_idx'].items():
num_inputs = vocab['program_token_arity'][fn_str]
self.function_modules_num_inputs[fn_str] = num_inputs
def create_module():
if num_inputs > 2:
raise Exception('Not implemented!')
if use_film == 1:
return FiLMModule(shared_block, fn_idx)
if use_film == 2:
separate_core_block = SharedFiLMedModule(module_dim, module_W,
kernel_size=module_kernel_size,
with_residual=module_residual)
return FiLMModule(separate_core_block, fn_idx)
if use_simple_block:
# brutally simple concatentation block
# with 2 layers, no residual connection
return SimpleConcatBlock(
module_dim,
kernel_size=module_kernel_size)
if num_inputs in [0, 1]:
return ResidualBlock(
module_dim,
kernel_size=module_kernel_size,
with_residual=module_residual,
with_batchnorm=module_batchnorm,
shared_block=shared_block,
post_linear=kl_loss)
else:
return ConcatBlock(
module_dim,
kernel_size=module_kernel_size,
with_residual=module_residual,
with_batchnorm=module_batchnorm,
shared_block=shared_block,
post_linear=kl_loss)
mod = create_module()
if mod is not None:
self.add_module(fn_str, mod)
self.function_modules[fn_str] = mod
self.save_module_outputs = False
self.noise_enabled = True
if learn_control:
self.controller = MACControl(30, rnn_dim, module_dim)
def _forward_modules_ints_helper(self, feats, program, i, j, module_outputs):
used_fn_j = True
orig_j = j
if j < program.size(1):
fn_idx = program.data[i, j]
fn_str = self.vocab['program_idx_to_token'][fn_idx.item()]
else:
used_fn_j = False
fn_str = 'scene'
if fn_str == '<NULL>':
used_fn_j = False
fn_str = 'scene'
elif fn_str == '<START>':
used_fn_j = False
return self._forward_modules_ints_helper(feats, program, i, j + 1, module_outputs)
if used_fn_j:
self.used_fns[i, j] = 1
j += 1
num_inputs = self.function_modules_num_inputs[fn_str]
if fn_str == 'scene':
num_inputs = 1
module = self.function_modules[fn_str]
if fn_str == 'scene':
module_inputs = [feats[i:i+1]]
else:
module_inputs = []
while len(module_inputs) < num_inputs:
cur_input, j = self._forward_modules_ints_helper(feats, program, i, j, module_outputs)
module_inputs.append(cur_input)
if self.use_film:
module_inputs = [feats[i:i+1]] + module_inputs
if self.use_simple_block:
# simple block must have 3 inputs
if len(module_inputs) < 2:
module_inputs.append(torch.zeros_like(module_inputs[0]))
module_inputs = [feats[i:i+1]] + module_inputs
module_output = module(*module_inputs)
if self.kl_loss:
mu = module_output[:, :self.module_dim]
logvar = module_output[:, self.module_dim:] - 5
logvar = torch.min(logvar, torch.ones_like(logvar))
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(logvar) if self.noise_enabled else 0
module_output = mu + std * eps
self._mus.append(mu)
self._logvars.append(logvar)
# a module is uniquely identified by an (i, orig_j)
if used_fn_j:
module_outputs[(i, orig_j)] = module_output
return module_output, j
def _forward_modules_ints(self, feats, program):
"""
feats: FloatTensor of shape (N, C, H, W) giving features for each image
program: LongTensor of shape (N, L) giving a prefix-encoded program for
each image.
"""
N = feats.size(0)
final_module_outputs = []
self.used_fns = torch.Tensor(program.size()).fill_(0)
module_outputs = {}
for i in range(N):
cur_output, _ = self._forward_modules_ints_helper(feats, program, i, 0, module_outputs)
final_module_outputs.append(cur_output)
final_module_outputs = torch.cat(final_module_outputs, 0)
self.used_fns = self.used_fns.type_as(program.data).float()
return final_module_outputs, module_outputs
def _forward_batch(self, feats, program, question, save_activations=False):
cur = None
batch_size = program.shape[0]
max_program_len = program.shape[1]
stacks = [[] for j in range(batch_size)]
program_wellformed = torch.ones(batch_size, dtype=torch.bool)
zero_inp = torch.zeros_like(feats)[:, :, 0, 0]
memory = zero_inp[None, :]
if question is not None:
controls, control_scores = self.controller(question)
assert max_program_len <= controls.shape[1]
lengths = (program > 0).sum(1)
new_controls = []
for j, leng in zip(range(batch_size), lengths):
#shift controls so that the last control goes to the first module
new_controls.append(
torch.cat([controls[j, -leng:],
torch.zeros((max_program_len - leng, controls.shape[2]),
device=controls.device)],
0))
controls = torch.cat([c[None, :] for c in new_controls], 0)
# skip <START> at the position 0
for i in reversed(range(1, max_program_len)):
fn_names = [self.vocab['program_idx_to_token'][program[j, i].item()]
for j in range(batch_size)]
mask = torch.ones_like(program[:, 0])
for j in range(batch_size):
if fn_names[j] in ['<END>', '<NULL>']:
mask[j] = 0
num_inputs = [self.function_modules_num_inputs[fn_name] if mask[j] else 0
for j, fn_name in enumerate(fn_names)]
# prepare inputs
input_indices = [[max_program_len, max_program_len] for j in range(batch_size)]
for j in range(batch_size):
for k in range(num_inputs[j]):
if stacks[j]:
input_indices[j][k] = stacks[j].pop()
else:
program_wellformed[j] = False
inputs = []
for k in range(2):
indices = [input_indices[j][k] - i - 1 for j in range(batch_size)]
inputs.append(memory[indices, range(batch_size)])
# run the batched compute
control_i = controls[:, i] if question else program[:, i]
cur = self.shared_block(feats, control_i, inputs[0], inputs[1])
memory = torch.cat([cur[None, :], memory])
# push the new results onto the stack
for j in range(batch_size):
if mask[j]:
stacks[j].append(i)
for j in range(batch_size):
if len(stacks[j]) != 1:
program_wellformed[j] = False
if save_activations and self.learn_control:
self.control_scores = control_scores
return cur, program_wellformed
def forward(self, x, program, save_activations=False, question=None):
N = x.size(0)
assert N == len(program)
feats = self.stem(x)
program_wellformed = None
if self.use_film == 1:
final_module_outputs, program_wellformed = self._forward_batch(
feats, program, question=question if self.learn_control else None,
save_activations=save_activations)
#check = self._forward_modules_ints(feats, program)
#print(abs(final_module_outputs - check[0]).sum())
else:
final_module_outputs, _ = self._forward_modules_ints(feats, program)
scores = self.classifier(final_module_outputs)
return scores, program_wellformed, None
| 15,132 | 40.803867 | 138 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/parser.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from . import create_seq2seq_net, TrainOptions
class Seq2seqParser(nn.Module):
"""Model interface for seq2seq parser"""
def __init__(self, vocab):
super().__init__()
self.opt = TrainOptions().parse()
self.vocab = vocab
self.net_params = self._get_net_params(self.opt, self.vocab)
self.seq2seq = create_seq2seq_net(**self.net_params)
self.variable_lengths = self.net_params['variable_lengths']
self.end_id = self.net_params['end_id']
#self.gpu_ids = opt.gpu_ids
self.criterion = nn.NLLLoss()
def set_input(self, x, y=None):
input_lengths, idx_sorted = None, None
if self.variable_lengths:
x, y, input_lengths, idx_sorted = self._sort_batch(x, y)
#self.x = self._to_var(x)
#if y is not None:
#self.y = self._to_var(y)
#else:
#self.y = None
self.x = x
self.y = y
self.input_lengths = input_lengths
self.idx_sorted = idx_sorted
def log_likelihood(self, x, y):
self.set_input(x, y)
assert self.y is not None, 'Must set y value'
output_logprob = self.seq2seq(self.x, self.y, self.input_lengths)
loss = self.criterion(output_logprob[:,:-1,:].contiguous().view(-1, output_logprob.size(2)),
self.y[:,1:].contiguous().view(-1))
return loss
def forward(self, x, argmax=False):
self.set_input(x)
rl_seq, logprobs = self.seq2seq.reinforce_forward(self.x, self.input_lengths, argmax=argmax)
rl_seq = self._restore_order(rl_seq.data.cpu())
logprobs = self._restore_order(logprobs)
self.reward = None # Need to recompute reward from environment each time a new sequence is sampled
return rl_seq.to(x.device), logprobs
def reinforce_backward(self, entropy_factor=0.0):
assert self.reward is not None, 'Must run forward sampling and set reward before REINFORCE'
self.seq2seq.reinforce_backward(self.reward, entropy_factor)
def parse(self):
output_sequence = self.seq2seq.sample_output(self.x, self.input_lengths)
output_sequence = self._restore_order(output_sequence.data.cpu())
return output_sequence
def _get_net_params(self, opt, vocab):
net_params = {
'input_vocab_size': len(vocab['question_token_to_idx']),
'output_vocab_size': len(vocab['program_token_to_idx']),
'hidden_size': opt.hidden_size,
'word_vec_dim': opt.word_vec_dim,
'n_layers': opt.n_layers,
'bidirectional': opt.bidirectional,
'variable_lengths': opt.variable_lengths,
'use_attention': opt.use_attention,
'encoder_max_len': opt.encoder_max_len,
'decoder_max_len': opt.decoder_max_len,
'start_id': opt.start_id,
'end_id': opt.end_id,
'word2vec_path': opt.word2vec_path,
'fix_embedding': opt.fix_embedding,
}
return net_params
def _sort_batch(self, x, y):
_, lengths = torch.eq(x, self.end_id).max(1)
lengths += 1
lengths_sorted, idx_sorted = lengths.sort(0, descending=True)
x_sorted = x[idx_sorted]
y_sorted = None
if y is not None:
y_sorted = y[idx_sorted]
lengths_list = lengths_sorted.cpu().numpy()
return x_sorted, y_sorted, lengths_list, idx_sorted
def _restore_order(self, x):
if self.idx_sorted is not None:
inv_idxs = self.idx_sorted.clone()
inv_idxs.scatter_(0, self.idx_sorted,
torch.arange(x.size(0)).to(inv_idxs.device).long())
return x[inv_idxs]
return x
def _to_var(self, x):
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def _to_numpy(self, x):
return x.data.cpu().numpy().astype(float)
| 4,059 | 37.301887 | 106 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/base_rnn.py | import torch.nn as nn
class BaseRNN(nn.Module):
"""Base RNN module"""
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p,
dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.max_len = max_len
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout_p = input_dropout_p
self.dropout_p = dropout_p
if rnn_cell == 'lstm':
self.rnn_cell = nn.LSTM
elif rnn_cell == 'gru':
self.rnn_cell = nn.GRU
else:
raise ValueError('Unsupported RNN Cell: %s' % rnn_cell)
self.input_dropout = nn.Dropout(p=input_dropout_p)
def forward(self, *args, **kwargs):
raise NotImplementedError() | 829 | 28.642857 | 74 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/base_options.py | import os
import argparse
import numpy as np
import torch
class BaseOptions():
"""Base option class"""
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--run_dir', default='_scratch/test_run', type=str, help='experiment directory')
self.parser.add_argument('--dataset', default='clevr', type=str, help='select dataset, options: clevr, clevr-humans')
# Dataloader
self.parser.add_argument('--shuffle', default=1, type=int, help='shuffle dataset')
self.parser.add_argument('--num_workers', default=1, type=int, help='number of workers for loading data')
# Run
self.parser.add_argument('--manual_seed', default=None, type=int, help='manual seed')
self.parser.add_argument('--gpu_ids', default='0', type=str, help='ids of gpu to be used')
self.parser.add_argument('--visualize', default=0, type=int, help='visualize experiment')
# Dataset catalog
# - CLEVR
self.parser.add_argument('--clevr_train_scene_path', default='../data/raw/CLEVR_v1.0/scenes/CLEVR_train_scenes.json',
type=str, help='path to clevr train scenes')
self.parser.add_argument('--clevr_val_scene_path', default='../data/raw/CLEVR_v1.0/scenes/CLEVR_val_scenes.json',
type=str, help='path to clevr val scenes')
self.parser.add_argument('--clevr_train_question_path', default='../data/reason/clevr_h5/clevr_train_questions.h5',
type=str, help='path to clevr train questions')
self.parser.add_argument('--clevr_val_question_path', default='../data/reason/clevr_h5/clevr_val_questions.h5',
type=str, help='path to clevr val questions')
self.parser.add_argument('--clevr_vocab_path', default='../data/reason/clevr_h5/clevr_vocab.json',
type=str, help='path to clevr vocab')
def parse(self):
# Instantiate option
self.opt = self.parser.parse_args([])
# Parse gpu id list
str_gpu_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_gpu_ids:
if str_id.isdigit() and int(str_id) >= 0:
self.opt.gpu_ids.append(int(str_id))
if len(self.opt.gpu_ids) > 0 and torch.cuda.is_available():
torch.cuda.set_device(self.opt.gpu_ids[0])
else:
print('| using cpu')
self.opt.gpu_ids = []
# Set manual seed
if self.opt.manual_seed is not None:
torch.manual_seed(self.opt.manual_seed)
if len(self.opt.gpu_ids) > 0 and torch.cuda.is_available():
torch.cuda.manual_seed(self.opt.manual_seed)
# Print and save options
args = vars(self.opt)
print('| options')
for k, v in args.items():
print('%s: %s' % (str(k), str(v)))
if not os.path.isdir(self.opt.run_dir):
os.makedirs(self.opt.run_dir)
if self.is_train:
file_path = os.path.join(self.opt.run_dir, 'train_opt.txt')
else:
file_path = os.path.join(self.opt.run_dir, 'test_opt.txt')
with open(file_path, 'wt') as fout:
fout.write('| options\n')
for k, v in args.items():
fout.write('%s: %s\n' % (str(k), str(v)))
return self.opt
| 3,439 | 45.486486 | 125 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/seq2seq.py | import torch
import torch.nn as nn
class Seq2seq(nn.Module):
"""Seq2seq model module
To do: add docstring to methods
"""
def __init__(self, encoder, decoder):
super(Seq2seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, x, y, input_lengths=None):
encoder_outputs, encoder_hidden = self.encoder(x, input_lengths)
decoder_outputs, decoder_hidden = self.decoder(y, encoder_outputs, encoder_hidden)
return decoder_outputs
def reinforce_forward(self, x, input_lengths=None, argmax=False):
encoder_outputs, encoder_hidden = self.encoder(x, input_lengths)
output_symbols, output_logprobs = self.decoder.forward_sample(encoder_outputs, encoder_hidden, reinforce_sample=not argmax)
return (torch.stack(output_symbols).transpose(0, 1),
torch.stack(output_logprobs).transpose(0, 1))
def reinforce_backward(self, reward, entropy_factor=0.0):
assert self.output_logprobs is not None and self.output_symbols is not None, 'must call reinforce_forward first'
losses = []
grad_output = []
for i, symbol in enumerate(self.output_symbols):
if len(self.output_symbols[0].shape) == 1:
loss = - torch.diag(torch.index_select(self.output_logprobs[i], 1, symbol)).sum()*reward \
+ entropy_factor*(self.output_logprobs[i]*torch.exp(self.output_logprobs[i])).sum()
else:
loss = - self.output_logprobs[i]*reward
losses.append(loss.sum())
grad_output.append(None)
torch.autograd.backward(losses, grad_output, retain_graph=True)
| 1,700 | 42.615385 | 131 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/utils.py | import os
import json
import numpy as np
import torch
def mkdirs(paths):
if isinstance(paths, list):
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
else:
if not os.path.exists(paths):
os.makedirs(paths)
def invert_dict(d):
return {v: k for k, v in d.items()}
def load_vocab(path):
with open(path, 'r') as f:
vocab = json.load(f)
vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
vocab['program_idx_to_token'] = invert_dict(vocab['program_token_to_idx'])
vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
# Sanity check: make sure <NULL>, <START>, and <END> are consistent
assert vocab['question_token_to_idx']['<NULL>'] == 0
assert vocab['question_token_to_idx']['<START>'] == 1
assert vocab['question_token_to_idx']['<END>'] == 2
assert vocab['program_token_to_idx']['<NULL>'] == 0
assert vocab['program_token_to_idx']['<START>'] == 1
assert vocab['program_token_to_idx']['<END>'] == 2
return vocab
def load_scenes(scenes_json):
with open(scenes_json) as f:
scenes_dict = json.load(f)['scenes']
scenes = []
for s in scenes_dict:
table = []
for i, o in enumerate(s['objects']):
item = {}
item['id'] = '%d-%d' % (s['image_index'], i)
if '3d_coords' in o:
item['position'] = [np.dot(o['3d_coords'], s['directions']['right']),
np.dot(o['3d_coords'], s['directions']['front']),
o['3d_coords'][2]]
else:
item['position'] = o['position']
item['color'] = o['color']
item['material'] = o['material']
item['shape'] = o['shape']
item['size'] = o['size']
table.append(item)
scenes.append(table)
return scenes
def load_embedding(path):
return torch.Tensor(np.load(path)) | 2,041 | 31.935484 | 85 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/encoder.py | import torch.nn as nn
from .base_rnn import BaseRNN
class Encoder(BaseRNN):
"""Encoder RNN module"""
def __init__(self, vocab_size, max_len, word_vec_dim, hidden_size, n_layers,
input_dropout_p=0., dropout_p=0., bidirectional=False, rnn_cell='lstm',
variable_lengths=False, word2vec=None, fix_embedding=False):
super(Encoder, self).__init__(vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell)
self.variable_lengths = variable_lengths
if word2vec is not None:
assert word2vec.size(0) == vocab_size
self.word_vec_dim = word2vec.size(1)
self.embedding = nn.Embedding(vocab_size, self.word_vec_dim)
self.embedding.weight = nn.Parameter(word2vec)
else:
self.word_vec_dim = word_vec_dim
self.embedding = nn.Embedding(vocab_size, word_vec_dim)
if fix_embedding:
self.embedding.weight.requires_grad = False
self.rnn = self.rnn_cell(self.word_vec_dim, hidden_size, n_layers,
batch_first=True, bidirectional=bidirectional, dropout=dropout_p)
def forward(self, input_var, input_lengths=None):
"""
To do: add input, output dimensions to docstring
"""
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
output, hidden = self.rnn(embedded)
if self.variable_lengths:
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
return output, hidden
| 1,726 | 44.447368 | 119 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/clevr_executor.py | import torch
import random
import json
CLEVR_COLORS = ['blue', 'brown', 'cyan', 'gray', 'green', 'purple', 'red', 'yellow']
CLEVR_MATERIALS = ['rubber', 'metal']
CLEVR_SHAPES = ['cube', 'cylinder', 'sphere']
CLEVR_SIZES = ['large', 'small']
CLEVR_ANSWER_CANDIDATES = {
'count': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10'],
'equal_color': ['yes', 'no'],
'equal_integer': ['yes', 'no'],
'equal_material': ['yes', 'no'],
'equal_shape': ['yes', 'no'],
'equal_size': ['yes', 'no'],
'exist': ['yes', 'no'],
'greater_than': ['yes', 'no'],
'less_than': ['yes', 'no'],
'query_color': ['blue', 'brown', 'cyan', 'gray', 'green', 'purple', 'red', 'yellow'],
'query_material': ['metal', 'rubber'],
'query_size': ['small', 'large'],
'query_shape': ['cube', 'cylinder', 'sphere'],
'same_color': ['yes', 'no'],
'same_material': ['yes', 'no'],
'same_size': ['yes', 'no'],
'same_shape': ['yes', 'no']
}
class ClevrExecutor:
"""Symbolic program executor for CLEVR"""
def __init__(self, vocab):
self.vocab = vocab
self.colors = CLEVR_COLORS
self.materials = CLEVR_MATERIALS
self.shapes = CLEVR_SHAPES
self.sizes = CLEVR_SIZES
self.answer_candidates = CLEVR_ANSWER_CANDIDATES
self.modules = {}
self._register_modules()
def __call__(self, scenes, programs):
preds = []
for i in range(programs.shape[0]):
pred = self.run(programs[i].cpu().numpy(), scenes[i])
preds.append(self.vocab['answer_token_to_idx'].get(pred, -1))
return torch.LongTensor(preds)
def run(self, x, scene, guess=False, debug=False):
assert self.modules, 'Must have scene annotations and define modules first'
ans, temp = None, None
# Find the length of the program sequence before the '<END>' token
length = 0
for k in range(len(x)):
l = len(x) - k
if self.vocab['program_idx_to_token'][x[l-1]] == '<END>':
length = l
if length == 0:
return 'error'
self.exe_trace = []
for j in range(length):
i = length - 1 - j
token = self.vocab['program_idx_to_token'][x[i]]
if token == 'scene':
if temp is not None:
ans = 'error'
break
temp = ans
ans = list(scene)
elif token in self.modules:
module = self.modules[token]
if token.startswith('same') or token.startswith('relate'):
ans = module(ans, scene)
else:
ans = module(ans, temp)
if ans == 'error':
break
self.exe_trace.append(ans)
if debug:
print(token)
print('ans:')
self._print_debug_message(ans)
print('temp: ')
self._print_debug_message(temp)
print()
ans = str(ans)
if ans == 'error' and guess:
final_module = self.vocab['program_idx_to_token'][x[0]]
if final_module in self.answer_candidates:
ans = random.choice(self.answer_candidates[final_module])
return ans
def _print_debug_message(self, x):
if type(x) == list:
for o in x:
print(self._object_info(o))
elif type(x) == dict:
print(self._object_info(x))
else:
print(x)
def _object_info(self, obj):
return '%s %s %s %s at %s' % (obj['size'], obj['color'], obj['material'], obj['shape'], str(obj['position']))
def _register_modules(self):
self.modules['count'] = self.count
self.modules['equal_color'] = self.equal_color
self.modules['equal_integer'] = self.equal_integer
self.modules['equal_material'] = self.equal_material
self.modules['equal_shape'] = self.equal_shape
self.modules['equal_size'] = self.equal_size
self.modules['exist'] = self.exist
self.modules['filter_color[blue]'] = self.filter_blue
self.modules['filter_color[brown]'] = self.filter_brown
self.modules['filter_color[cyan]'] = self.filter_cyan
self.modules['filter_color[gray]'] = self.filter_gray
self.modules['filter_color[green]'] = self.filter_green
self.modules['filter_color[purple]'] = self.filter_purple
self.modules['filter_color[red]'] = self.filter_red
self.modules['filter_color[yellow]'] = self.filter_yellow
self.modules['filter_material[rubber]'] = self.filter_rubber
self.modules['filter_material[metal]'] = self.filter_metal
self.modules['filter_shape[cube]'] = self.filter_cube
self.modules['filter_shape[cylinder]'] = self.filter_cylinder
self.modules['filter_shape[sphere]'] = self.filter_sphere
self.modules['filter_size[large]'] = self.filter_large
self.modules['filter_size[small]'] = self.filter_small
self.modules['greater_than'] = self.greater_than
self.modules['less_than'] = self.less_than
self.modules['intersect'] = self.intersect
self.modules['query_color'] = self.query_color
self.modules['query_material'] = self.query_material
self.modules['query_shape'] = self.query_shape
self.modules['query_size'] = self.query_size
self.modules['relate[behind]'] = self.relate_behind
self.modules['relate[front]'] = self.relate_front
self.modules['relate[left]'] = self.relate_left
self.modules['relate[right]'] = self.relate_right
self.modules['same_color'] = self.same_color
self.modules['same_material'] = self.same_material
self.modules['same_shape'] = self.same_shape
self.modules['same_size'] = self.same_size
self.modules['union'] = self.union
self.modules['unique'] = self.unique
def count(self, scene, _):
if type(scene) == list:
return len(scene)
return 'error'
def equal_color(self, color1, color2):
if type(color1) == str and color1 in self.colors and type(color2) == str and color2 in self.colors:
if color1 == color2:
return 'yes'
else:
return 'no'
return 'error'
def equal_integer(self, integer1, integer2):
if type(integer1) == int and type(integer2) == int:
if integer1 == integer2:
return 'yes'
else:
return 'no'
return 'error'
def equal_material(self, material1, material2):
if type(material1) == str and material1 in self.materials and type(material2) == str and material2 in self.materials:
if material1 == material2:
return 'yes'
else:
return 'no'
return 'error'
def equal_shape(self, shape1, shape2):
if type(shape1) == str and shape1 in self.shapes and type(shape2) == str and shape2 in self.shapes:
if shape1 == shape2:
return 'yes'
else:
return 'no'
return 'error'
def equal_size(self, size1, size2):
if type(size1) == str and size1 in self.sizes and type(size2) == str and size2 in self.sizes:
if size1 == size2:
return 'yes'
else:
return 'no'
return 'error'
def exist(self, scene, _):
if type(scene) == list:
if len(scene) != 0:
return 'yes'
else:
return 'no'
return 'error'
def filter_blue(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'blue':
output.append(o)
return output
return 'error'
def filter_brown(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'brown':
output.append(o)
return output
return 'error'
def filter_cyan(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'cyan':
output.append(o)
return output
return 'error'
def filter_gray(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'gray':
output.append(o)
return output
return 'error'
def filter_green(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'green':
output.append(o)
return output
return 'error'
def filter_purple(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'purple':
output.append(o)
return output
return 'error'
def filter_red(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'red':
output.append(o)
return output
return 'error'
def filter_yellow(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['color'] == 'yellow':
output.append(o)
return output
return 'error'
def filter_rubber(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['material'] == 'rubber':
output.append(o)
return output
return 'error'
def filter_metal(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['material'] == 'metal':
output.append(o)
return output
return 'error'
def filter_cube(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['shape'] == 'cube':
output.append(o)
return output
return 'error'
def filter_cylinder(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['shape'] == 'cylinder':
output.append(o)
return output
return 'error'
def filter_sphere(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['shape'] == 'sphere':
output.append(o)
return output
return 'error'
def filter_large(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['size'] == 'large':
output.append(o)
return output
return 'error'
def filter_small(self, scene, _):
if type(scene) == list:
output = []
for o in scene:
if o['size'] == 'small':
output.append(o)
return output
return 'error'
def greater_than(self, integer1, integer2):
if type(integer1) == int and type(integer2) == int:
if integer1 > integer2:
return 'yes'
else:
return 'no'
return 'error'
def less_than(self, integer1, integer2):
if type(integer1) == int and type(integer2) == int:
if integer1 < integer2:
return 'yes'
else:
return 'no'
return 'error'
def intersect(self, scene1, scene2):
if type(scene1) == list and type(scene2) == list:
output = []
for o in scene1:
if o in scene2:
output.append(o)
return output
return 'error'
def query_color(self, obj, _):
if type(obj) == dict and 'color' in obj:
return obj['color']
return 'error'
def query_material(self, obj, _):
if type(obj) == dict and 'material' in obj:
return obj['material']
return 'error'
def query_shape(self, obj, _):
if type(obj) == dict and 'shape' in obj:
return obj['shape']
return 'error'
def query_size(self, obj, _):
if type(obj) == dict and 'size' in obj:
return obj['size']
return 'error'
def relate_behind(self, obj, scene):
if type(obj) == dict and 'position' in obj and type(scene) == list:
output = []
for o in scene:
if o['position'][1] < obj['position'][1]:
output.append(o)
return output
return 'error'
def relate_front(self, obj, scene):
if type(obj) == dict and 'position' in obj and type(scene) == list:
output = []
for o in scene:
if o['position'][1] > obj['position'][1]:
output.append(o)
return output
return 'error'
def relate_left(self, obj, scene):
if type(obj) == dict and 'position' in obj and type(scene) == list:
output = []
for o in scene:
if o['position'][0] < obj['position'][0]:
output.append(o)
return output
return 'error'
def relate_right(self, obj, scene):
if type(obj) == dict and 'position' in obj and type(scene) == list:
output = []
for o in scene:
if o['position'][0] > obj['position'][0]:
output.append(o)
return output
return 'error'
def same_color(self, obj, scene):
if type(obj) == dict and 'color' in obj and type(scene) == list:
output = []
for o in scene:
if o['color'] == obj['color'] and o['id'] != obj['id']:
output.append(o)
return output
return 'error'
def same_material(self, obj, scene):
if type(obj) == dict and 'material' in obj and type(scene) == list:
output = []
for o in scene:
if o['material'] == obj['material'] and o['id'] != obj['id']:
output.append(o)
return output
return 'error'
def same_shape(self, obj, scene):
if type(obj) == dict and 'shape' in obj and type(scene) == list:
output = []
for o in scene:
if o['shape'] == obj['shape'] and o['id'] != obj['id']:
output.append(o)
return output
return 'error'
def same_size(self, obj, scene):
if type(obj) == dict and 'size' in obj and type(scene) == list:
output = []
for o in scene:
if o['size'] == obj['size'] and o['id'] != obj['id']:
output.append(o)
return output
return 'error'
def union(self, scene1, scene2):
if type(scene1) == list and type(scene2) == list:
output = list(scene2)
for o in scene1:
if o not in scene2:
output.append(o)
return output
return 'error'
def unique(self, scene, _):
if type(scene) == list and len(scene) > 0:
return scene[0]
return 'error'
| 15,787 | 32.378436 | 125 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/decoder.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from .base_rnn import BaseRNN
from .attention import Attention
def logical_or(x, y):
return (x + y).clamp_(0, 1)
def logical_not(x):
return x == 0
class Decoder(BaseRNN):
"""Decoder RNN module
To do: add docstring to methods
"""
def __init__(self, vocab_size, max_len, word_vec_dim, hidden_size,
n_layers, start_id=1, end_id=2, rnn_cell='lstm',
bidirectional=False, input_dropout_p=0,
dropout_p=0, use_attention=False):
super(Decoder, self).__init__(vocab_size, max_len, hidden_size,
input_dropout_p, dropout_p, n_layers, rnn_cell)
self.max_length = max_len
self.output_size = vocab_size
self.hidden_size = hidden_size
self.word_vec_dim = word_vec_dim
self.bidirectional_encoder = bidirectional
if bidirectional:
self.hidden_size *= 2
self.use_attention = use_attention
self.start_id = start_id
self.end_id = end_id
self.embedding = nn.Embedding(self.output_size, self.word_vec_dim)
self.rnn = self.rnn_cell(self.word_vec_dim, self.hidden_size, n_layers, batch_first=True, dropout=dropout_p)
self.out_linear = nn.Linear(self.hidden_size, self.output_size)
if use_attention:
self.attention = Attention(self.hidden_size)
def forward_step(self, input_var, hidden, encoder_outputs):
batch_size = input_var.size(0)
output_size = input_var.size(1)
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
output, hidden = self.rnn(embedded, hidden)
attn = None
if self.use_attention:
output, attn = self.attention(output, encoder_outputs)
output = self.out_linear(output.contiguous().view(-1, self.hidden_size))
predicted_softmax = F.log_softmax(output.view(batch_size, output_size, -1), 2)
return predicted_softmax, hidden, attn
def forward(self, y, encoder_outputs, encoder_hidden):
decoder_hidden = self._init_state(encoder_hidden)
decoder_outputs, decoder_hidden, attn = self.forward_step(y, decoder_hidden, encoder_outputs)
return decoder_outputs, decoder_hidden
def forward_sample(self, encoder_outputs, encoder_hidden, reinforce_sample=False):
if isinstance(encoder_hidden, tuple):
batch_size = encoder_hidden[0].size(1)
else:
batch_size = encoder_hidden.size(1)
decoder_hidden = self._init_state(encoder_hidden)
decoder_input = Variable(torch.LongTensor(batch_size, 1).fill_(self.start_id))
decoder_input = decoder_input.to(encoder_hidden[0].device)
output_symbols = [decoder_input.squeeze()]
output_logprobs = [torch.zeros(batch_size).to(decoder_input.device)]
done = torch.ByteTensor(batch_size).fill_(0).to(decoder_input.device)
def decode(i, output, reinforce_sample=reinforce_sample):
nonlocal done
if reinforce_sample:
dist = torch.distributions.Categorical(probs=torch.exp(output.view(batch_size, -1))) # better initialize with logits
symbols = dist.sample().unsqueeze(1)
else:
symbols = output.topk(1)[1].view(batch_size, -1)
symbol_logprobs = output[:, 0, :][torch.arange(batch_size), symbols[:, 0]]
not_done = logical_not(done)
output_logprobs.append(not_done.float() * symbol_logprobs)
output_symbols.append(symbols.squeeze())
done = logical_or(done, symbols[:, 0] == self.end_id)
return symbols
for i in range(self.max_length):
decoder_output, decoder_hidden, step_attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs)
decoder_input = decode(i, decoder_output)
return output_symbols, output_logprobs
def _init_state(self, encoder_hidden):
if encoder_hidden is None:
return None
if isinstance(encoder_hidden, tuple):
encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])
else:
encoder_hidden = self._cat_directions(encoder_hidden)
return encoder_hidden
def _cat_directions(self, h):
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
| 4,566 | 39.061404 | 132 | py |
CLOSURE | CLOSURE-master/vr/ns_vqa/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""Attention layer"""
def __init__(self, dim, use_weight=False, hidden_size=512):
super(Attention, self).__init__()
self.use_weight = use_weight
self.hidden_size = hidden_size
if use_weight:
print('| using weighted attention layer')
self.attn_weight = nn.Linear(hidden_size, hidden_size, bias=False)
self.linear_out = nn.Linear(2*dim, dim)
def forward(self, output, context):
"""
- args
output : Tensor
decoder output, dim (batch_size, output_size, hidden_size)
context : Tensor
context vector from encoder, dim (batch_size, input_size, hidden_size)
- returns
output : Tensor
attention layer output, dim (batch_size, output_size, hidden_size)
attn : Tensor
attention map, dim (batch_size, output_size, input_size)
"""
batch_size = output.size(0)
hidden_size = output.size(2)
input_size = context.size(1)
if self.use_weight:
output = self.attn_weight(output.contiguous().view(-1, hidden_size)).view(batch_size, -1, hidden_size)
attn = torch.bmm(output, context.transpose(1, 2))
attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size, -1, input_size) # (batch_size, output_size, input_size)
mix = torch.bmm(attn, context) # (batch_size, output_size, hidden_size)
comb = torch.cat((mix, output * 0), dim=2) # (batch_size, output_size, 2*hidden_size)
output = torch.tanh(self.linear_out(comb.view(-1, 2*hidden_size)).view(batch_size, -1, hidden_size)) # (batch_size, output_size, hidden_size)
return output, attn
| 1,804 | 38.23913 | 149 | py |
CLOSURE | CLOSURE-master/scripts/run_pg.py | # This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import argparse
import json
import random
import shutil
from termcolor import colored
import time
from tqdm import tqdm
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import numpy as np
import h5py
from scipy.misc import imread, imresize, imsave
import vr.utils as utils
import vr.programs
from vr.data import ClevrDataset, ClevrDataLoader
from vr.preprocess import tokenize, encode
from vr.models import *
parser = argparse.ArgumentParser()
parser.add_argument('--program_generator', default=None)
parser.add_argument('--execution_engine', default=None)
parser.add_argument('--debug_every', default=float('inf'), type=float)
parser.add_argument('--use_gpu', default=torch.cuda.is_available(), type=int)
# For running on a preprocessed dataset
parser.add_argument('--data_dir', default=None, type=str)
parser.add_argument('--part', default='val', type=str)
# This will override the vocab stored in the checkpoint;
# we need this to run CLEVR models on human data
parser.add_argument('--vocab_json', default=None)
parser.add_argument('--num_examples', default=None, type=int)
# If this is passed, then save all predictions to this file
parser.add_argument('--output_h5', default=None)
parser.add_argument('--output_preds', default=None)
grads = {}
programs = {} # NOTE: Useful for zero-shot program manipulation when in debug mode
def main(args):
input_question_h5 = os.path.join(args.data_dir, '{}_questions.h5'.format(args.part))
input_features_h5 = os.path.join(args.data_dir, '{}_features.h5'.format(args.part))
pg, _ = utils.load_program_generator(args.program_generator)
dtype = torch.FloatTensor
if args.use_gpu == 1:
dtype = torch.cuda.FloatTensor
vocab = load_vocab(args)
loader_kwargs = {
'question_h5': input_question_h5,
'feature_h5': input_features_h5,
'vocab': vocab,
'batch_size': 128,
}
with ClevrDataLoader(**loader_kwargs) as loader:
run_batch(args, pg, loader, dtype)
def run_batch(args, pg, loader, dtype):
pg.type(dtype)
pg.eval()
all_correct = []
all_preds = []
num_samples = 0
num_correct = 0
for batch in tqdm(loader):
questions, images, feats, answers, programs = batch
if isinstance(questions, list):
questions_var = questions[0].type(dtype).long()
else:
questions_var = questions.type(dtype).long()
feats_var = feats.type(dtype)
programs = programs.to(feats_var.device)
programs_pred, _ = pg.forward(questions_var, argmax=True)
min_length = min(programs.shape[1], programs_pred.shape[1])
programs_pred = programs_pred[:, :min_length]
programs = programs[:, :min_length]
correct = (programs_pred == programs).int().sum(1) == min_length
num_correct += correct.sum()
all_correct.append(correct)
all_preds.append(programs_pred)
num_samples += programs.size(0)
if args.num_examples and num_samples >= args.num_examples:
break
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
output_path = ('output_' + args.part + "_" + args.program_generator[:-3] + ".h5"
if not args.output_h5
else args.output_h5)
preds_path = ('programs_' + args.part + "_" + args.program_generator[:-3] + ".txt"
if not args.output_preds
else args.output_preds)
print('Writing output to "%s"' % output_path)
with h5py.File(output_path, 'w') as fout:
fout.create_dataset('correct', data=torch.cat(all_correct, 0).cpu().numpy())
vocab = load_vocab(args)
all_preds = torch.cat(all_preds, 0).cpu().numpy()
all_preds_strings = []
for i in range(len(all_preds)):
all_preds_strings.append(
" ".join(vocab['program_idx_to_token'][w] for w in all_preds[i]))
save_to_file(all_preds_strings, preds_path)
if args.debug_every <= 1:
pdb.set_trace()
return
def load_vocab(args):
return utils.load_cpu(args.program_generator)['vocab']
def save_grad(name):
def hook(grad):
grads[name] = grad
return hook
def save_to_file(text, filename):
with open(filename, mode='wt', encoding='utf-8') as myfile:
myfile.write('\n'.join(text))
myfile.write('\n')
def get_index(l, index, default=-1):
try:
return l.index(index)
except ValueError:
return default
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 5,036 | 29.713415 | 138 | py |
CLOSURE | CLOSURE-master/scripts/train_model.py | #!/usr/bin/env python3
# This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import argparse
import json
import os
import pdb
import random
import shutil
import sys
import subprocess
import time
import logging
import itertools
import lru
import pickle
import h5py
import numpy as np
from termcolor import colored
import torch
torch.backends.cudnn.enabled = True
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel, DataParallel
import vr
import vr.utils
import vr.preprocess
from vr.data import (ClevrDataset,
ClevrDataLoader)
from vr.models import *
from vr.ns_vqa.parser import Seq2seqParser
from vr.ns_vqa.clevr_executor import ClevrExecutor
parser = argparse.ArgumentParser()
logger = logging.getLogger(__name__)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def is_multigpu():
nproc_str = os.environ.get('NPROC', '1')
if not nproc_str:
return False
return int(nproc_str) > 1
def atomic_torch_save(object_, path):
tmp_path = path + '.tmp'
torch.save(object_, tmp_path)
shutil.move(tmp_path, path)
def parse_int_list(input_):
if not input_:
return []
return list(map(int, input_.split(',')))
def parse_float_list(input_):
if not input_:
return []
return list(map(float, input_.split(',')))
def one_or_list(parser):
def parse_one_or_list(input_):
output = parser(input_)
if len(output) == 1:
return output[0]
else:
return output
return parse_one_or_list
def get_parameter_norm(model):
total_param_norm = 0
for p in model.parameters():
total_param_norm += (p ** 2).sum()
return total_param_norm ** (1. / 2)
def get_parameter_grad_norm(model):
total_param_norm = 0
for p in model.parameters():
if p.grad is not None:
total_param_norm += (p.grad ** 2).sum()
return total_param_norm ** (1. / 2)
parser.add_argument("--seed", default=None)
# for DDP launcher
parser.add_argument("--rank", type=int, default=0)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--world_size", type=int, default=1)
# Input data
parser.add_argument('--data_dir', required=True)
parser.add_argument('--val_part', default=[], action='append')
parser.add_argument('--feature_dim', default=[1024,14,14], type=parse_int_list)
parser.add_argument('--vocab_json', default='vocab.json')
parser.add_argument('--load_features', type=int, default=1)
parser.add_argument('--loader_num_workers', type=int, default=0)
parser.add_argument('--use_local_copies', default=0, type=int)
parser.add_argument('--cleanup_local_copies', default=1, type=int)
parser.add_argument('--family_split_file', default=None)
parser.add_argument('--num_train_samples', default=None, type=int)
parser.add_argument('--num_val_samples', default=None, type=int)
parser.add_argument('--shuffle_train_data', default=1, type=int)
parser.add_argument('--oversample', type=int)
parser.add_argument('--oversample_shift', type=int)
parser.add_argument('--percent_of_data_for_training', default=1., type=float)
parser.add_argument('--simple_encoder', default=0, type=int)
# What type of model to use and which parts to train
parser.add_argument('--model_type', default='PG',
choices=['RTfilm', 'Tfilm', 'FiLM',
'PG', 'EE', 'PG+EE', 'Control-EE',
'LSTM', 'CNN+LSTM', 'CNN+LSTM+SA',
'Hetero', 'MAC',
'SimpleNMN', 'RelNet', 'SHNMN',
'ConvLSTM'])
parser.add_argument('--train_program_generator', default=1, type=int)
parser.add_argument('--train_execution_engine', default=1, type=int)
parser.add_argument('--baseline_train_only_rnn', default=0, type=int)
# Start from an existing checkpoint
parser.add_argument('--program_generator_start_from', default=None)
parser.add_argument('--execution_engine_start_from', default=None)
parser.add_argument('--baseline_start_from', default=None)
# RNN options (for PG)
parser.add_argument('--rnn_wordvec_dim', default=300, type=int)
parser.add_argument('--rnn_hidden_dim', default=256, type=int)
parser.add_argument('--rnn_num_layers', default=2, type=int)
parser.add_argument('--rnn_dropout', default=0, type=float)
parser.add_argument('--rnn_attention', action='store_true')
parser.add_argument('--rnn_nonautoreg', action='store_true')
parser.add_argument('--ns_vqa', action='store_true')
# Symbolic EE
parser.add_argument('--symbolic_ee', action='store_true')
# Module net / FiLMedNet options
parser.add_argument('--module_stem_num_layers', default=2, type=int)
parser.add_argument('--module_stem_subsample_layers', default=[], type=parse_int_list)
parser.add_argument('--module_stem_batchnorm', default=0, type=int)
parser.add_argument('--module_dim', default=128, type=int)
parser.add_argument('--stem_dim', default=64, type=int)
parser.add_argument('--module_residual', default=1, type=int)
parser.add_argument('--module_batchnorm', default=0, type=int)
parser.add_argument('--module_intermediate_batchnorm', default=0, type=int)
parser.add_argument('--use_color', default=0, type=int)
parser.add_argument('--nmn_type', default='chain1', choices = ['chain1', 'chain2', 'chain3', 'tree'])
# FiLM only options
parser.add_argument('--set_execution_engine_eval', default=0, type=int)
parser.add_argument('--program_generator_parameter_efficient', default=1, type=int)
parser.add_argument('--rnn_output_batchnorm', default=0, type=int)
parser.add_argument('--bidirectional', default=0, type=int)
parser.add_argument('--encoder_type', default='gru', type=str,
choices=['linear', 'gru', 'lstm'])
parser.add_argument('--decoder_type', default='linear', type=str,
choices=['linear', 'gru', 'lstm'])
parser.add_argument('--gamma_option', default='linear',
choices=['linear', 'sigmoid', 'tanh', 'exp'])
parser.add_argument('--gamma_baseline', default=1, type=float)
parser.add_argument('--num_modules', default=4, type=int)
parser.add_argument('--module_stem_kernel_size', default=[3], type=parse_int_list)
parser.add_argument('--module_stem_stride', default=[1], type=parse_int_list)
parser.add_argument('--module_stem_padding', default=None, type=parse_int_list)
parser.add_argument('--module_num_layers', default=1, type=int) # Only mnl=1 currently implemented
parser.add_argument('--module_batchnorm_affine', default=0, type=int) # 1 overrides other factors
parser.add_argument('--module_dropout', default=5e-2, type=float)
parser.add_argument('--module_input_proj', default=1, type=int) # Inp conv kernel size (0 for None)
parser.add_argument('--module_kernel_size', default=3, type=int)
parser.add_argument('--condition_method', default='bn-film', type=str,
choices=['nothing', 'block-input-film', 'block-output-film', 'bn-film', 'concat', 'conv-film', 'relu-film'])
parser.add_argument('--condition_pattern', default=[], type=parse_int_list) # List of 0/1's (len = # FiLMs)
parser.add_argument('--use_gamma', default=1, type=int)
parser.add_argument('--use_beta', default=1, type=int)
parser.add_argument('--use_coords', default=1, type=int) # 0: none, 1: low usage, 2: high usage
parser.add_argument('--grad_clip', default=0, type=float) # <= 0 for no grad clipping
parser.add_argument('--debug_every', default=float('inf'), type=float) # inf for no pdb
parser.add_argument('--print_verbose_every', default=float('inf'), type=float) # inf for min print
parser.add_argument('--film_use_attention', default=0, type=int)
#MAC options
parser.add_argument('--mac_write_unit', default='original', type=str)
parser.add_argument('--mac_read_connect', default='last', type=str)
parser.add_argument('--mac_read_unit', default='original', type=str)
parser.add_argument('--mac_vib_start', default=0, type=float)
parser.add_argument('--mac_vib_coof', default=0., type=float)
parser.add_argument('--mac_use_self_attention', default=1, type=int)
parser.add_argument('--mac_use_memory_gate', default=1, type=int)
parser.add_argument('--mac_nonlinearity', default='ELU', type=str)
parser.add_argument('--mac_question2output', default=1, type=int)
parser.add_argument('--mac_train_just_control', action='store_true')
parser.add_argument('--mac_question_embedding_dropout', default=0.08, type=float)
parser.add_argument('--mac_stem_dropout', default=0.18, type=float)
parser.add_argument('--mac_memory_dropout', default=0.15, type=float)
parser.add_argument('--mac_read_dropout', default=0.15, type=float)
parser.add_argument('--mac_use_prior_control_in_control_unit', default=0, type=int)
parser.add_argument('--variational_embedding_dropout', default=0.15, type=float)
parser.add_argument('--mac_embedding_uniform_boundary', default=1., type=float)
parser.add_argument('--hard_code_control', action="store_true")
parser.add_argument('--exponential_moving_average_weight', default=1., type=float)
#NMNFilm2 options
parser.add_argument('--nmn_use_film', default=0, type=int)
parser.add_argument('--nmn_use_simple_block', default=0, type=int)
parser.add_argument('--nmn_module_pool', default='mean', type=str)
parser.add_argument('--nmn_use_gammas', default='identity', type=str)
parser.add_argument('--nmn_learn_control', default=0, type=int)
parser.add_argument('--entropy_coef', default=0.0, type=float)
# CNN options (for baselines)
parser.add_argument('--cnn_res_block_dim', default=128, type=int)
parser.add_argument('--cnn_num_res_blocks', default=0, type=int)
parser.add_argument('--cnn_proj_dim', default=512, type=int)
parser.add_argument('--cnn_pooling', default='maxpool2',
choices=['none', 'maxpool2'])
# Stacked-Attention options
parser.add_argument('--stacked_attn_dim', default=512, type=int)
parser.add_argument('--num_stacked_attn', default=2, type=int)
# Classifier options
parser.add_argument('--classifier_proj_dim', default=512, type=int)
parser.add_argument('--classifier_downsample', default='maxpool2',
choices=['maxpool2', 'maxpool3', 'maxpool4', 'maxpool5', 'maxpool7', 'maxpoolfull', 'none',
'avgpool2', 'avgpool3', 'avgpool4', 'avgpool5', 'avgpool7', 'avgpoolfull', 'aggressive',
'hybrid'])
parser.add_argument('--classifier_fc_dims', default=[1024], type=parse_int_list)
parser.add_argument('--classifier_batchnorm', default=0, type=int)
parser.add_argument('--classifier_dropout', default=0.0, type=one_or_list(parse_float_list))
# Discriminator options
parser.add_argument('--discriminator_proj_dim', default=512, type=int)
parser.add_argument('--discriminator_downsample', default='maxpool2',
choices=['maxpool2', 'maxpool3', 'maxpool4', 'maxpool5', 'maxpool7', 'maxpoolfull', 'none',
'avgpool2', 'avgpool3', 'avgpool4', 'avgpool5', 'avgpool7', 'avgpoolfull', 'aggressive',
'hybrid'])
parser.add_argument('--discriminator_fc_dims', default=[1024], type=parse_int_list)
parser.add_argument('--discriminator_dropout', default=0.0, type=one_or_list(parse_float_list))
# Optimization options
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--val_batch_size', default=512, type=int)
parser.add_argument('--num_iterations', default=100000, type=int)
parser.add_argument('--optimizer', default='Adam',
choices=['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'ASGD', 'RMSprop', 'SGD'])
parser.add_argument('--learning_rate', default=5e-4, type=float)
parser.add_argument('--pg_learning_rate', default=None, type=float)
parser.add_argument('--beta1', default=0.9, type=float)
parser.add_argument('--beta2', default=0.999, type=float)
parser.add_argument('--eps', default=1e-8, type=float)
parser.add_argument('--reward_decay', default=0.9, type=float)
parser.add_argument('--same_reward', action="store_true", default=False)
parser.add_argument('--weight_decay', default=0, type=float)
parser.add_argument('--ewa_baseline', default=1, type=int)
parser.add_argument('--enforce_wellformed', default=False, action="store_true")
parser.add_argument('--temperature_increase', default=None, type=float)
# Output options
parser.add_argument('--checkpoint_path', default='{slurmid}.pt')
parser.add_argument('--allow_resume', action='store_true')
parser.add_argument('--load_ee_parameters', default=None, type=str)
parser.add_argument('--randomize_checkpoint_path', type=int, default=0)
parser.add_argument('--avoid_checkpoint_override', default=0, type=int)
parser.add_argument('--record_loss_every', default=1, type=int)
parser.add_argument('--checkpoint_every', default=400, type=int)
parser.add_argument('--validate_every', default=10000, type=int)
parser.add_argument('--time', default=0, type=int)
def main(args):
if args.validate_every % args.checkpoint_every != 0:
raise ValueError("must validate at iteration where checkpointing is also done")
if is_multigpu():
torch.distributed.init_process_group(backend='nccl')
global device
device = (torch.device('cuda:{}'.format(args.local_rank))
if torch.cuda.is_available()
else torch.device('cpu'))
if args.seed is not None:
torch.manual_seed(args.seed)
nmn_iwp_code = list(vr.__path__)[0]
try:
last_commit = subprocess.check_output(
'cd {}; git log -n1'.format(nmn_iwp_code), shell=True).decode('utf-8')
logger.info('LAST COMMIT INFO:')
logger.info(last_commit)
except subprocess.CalledProcessError:
logger.info('Could not figure out the last commit')
try:
diff = subprocess.check_output(
'cd {}; git diff'.format(nmn_iwp_code), shell=True).decode('utf-8')
if diff:
logger.info('GIT DIFF:')
logger.info(diff)
except subprocess.CalledProcessError:
logger.info('Could not figure out the last commit')
logger.info('Will save checkpoints to %s' % args.checkpoint_path)
args.vocab_json = os.path.join(args.data_dir, args.vocab_json)
if not args.checkpoint_path:
raise NotImplementedError('no default checkpoint path')
args.vocab_json = os.path.join(args.data_dir, args.vocab_json)
vocab = vr.utils.load_vocab(args.vocab_json)
logger.info(args)
question_families = None
if args.family_split_file is not None:
with open(args.family_split_file, 'r') as f:
question_families = json.load(f)
scenes_needed = args.symbolic_ee
features_needed = args.model_type != 'PG' and not args.symbolic_ee
train_question_h5 = os.path.join(args.data_dir, 'train_questions.h5')
train_features_h5 = os.path.join(args.data_dir, 'train_features.h5')
train_scenes = os.path.join(args.data_dir, 'train_scenes.json')
train_loader_kwargs = {
'question_h5': train_question_h5,
'feature_h5': train_features_h5 if features_needed else None,
'scene_path': train_scenes if scenes_needed else None,
'load_features': args.load_features,
'vocab': vocab,
'batch_size': args.batch_size,
'shuffle': args.shuffle_train_data == 1,
'question_families': question_families,
'max_samples': args.num_train_samples,
'num_workers': args.loader_num_workers,
'percent_of_data': args.percent_of_data_for_training,
'oversample': args.oversample,
'oversample_shift': args.oversample_shift
}
train_loader = ClevrDataLoader(**train_loader_kwargs)
val_loaders = []
for val_part in args.val_part:
val_question_h5 = os.path.join(args.data_dir, '{}_questions.h5'.format(val_part))
val_features_h5 = os.path.join(args.data_dir, '{}_features.h5'.format(val_part))
val_scenes = os.path.join(args.data_dir, '{}_scenes.json'.format(val_part))
val_loader_kwargs = {
'question_h5': val_question_h5,
'feature_h5': val_features_h5 if features_needed else None,
'scene_path': val_scenes if scenes_needed else None,
'load_features': args.load_features,
'vocab': vocab,
'batch_size': args.val_batch_size,
'question_families': question_families,
'max_samples': args.num_val_samples,
'num_workers': args.loader_num_workers,
}
val_loaders.append(ClevrDataLoader(**val_loader_kwargs))
try:
train_loop(args, train_loader, val_loaders)
finally:
for loader in [train_loader] + val_loaders:
loader.close()
def train_loop(args, train_loader, val_loaders):
vocab = vr.utils.load_vocab(args.vocab_json)
program_generator, pg_kwargs, pg_optimizer = None, None, None
execution_engine, ee_kwargs, ee_optimizer = None, None, None
baseline_model, baseline_kwargs, baseline_optimizer = None, None, None
baseline_type = None
stats = {
'train_losses': [], 'train_rewards': [], 'train_losses_ts': [],
'train_accs': [], 'val_accs_ts': [], 'alphas' : [], 'grads' : [],
'model_t': 0, 'model_epoch': 0,
'entropy': [], 'prog_acc': [], 'compute_time': []
}
for val_part in args.val_part:
stats['best_' + val_part + '_acc'] = -1
stats[val_part + "_accs"] = []
models_that_need_pg = ['MAC', 'RTfilm', 'Tfilm', 'FiLM',
'PG', 'PG+EE', 'Control-EE', 'RelNet', 'ConvLSTM']
models_that_need_ee = ['MAC', 'RTfilm', 'Tfilm', 'FiLM', 'EE', 'PG+EE',
'Control-EE', 'Hetero', 'SimpleNMN', 'SHNMN', 'RelNet', 'ConvLSTM']
# Set up model
if args.allow_resume and os.path.exists(args.checkpoint_path):
# EITHER resume existing experiment
logger.info("Trying to resume")
if args.model_type in models_that_need_pg:
program_generator, pg_kwargs = vr.utils.load_program_generator(args.checkpoint_path)
program_generator.to(device)
if is_multigpu():
program_generator = DistributedDataParallel(program_generator, device_ids=[args.local_rank])
if args.model_type in models_that_need_ee:
if args.symbolic_ee:
execution_engine, ee_kwargs = get_execution_engine(args)
else:
execution_engine, ee_kwargs = vr.utils.load_execution_engine(args.checkpoint_path)
execution_engine.to(device)
if is_multigpu():
execution_engine = DistributedDataParallel(execution_engine, device_ids=[args.local_rank])
with open(args.checkpoint_path + '.json', 'r') as f:
checkpoint = json.load(f)
for key in list(stats.keys()):
if key in checkpoint:
stats[key] = checkpoint[key]
stats['model_epoch'] -= 1
best_pg_state = get_state(program_generator)
best_ee_state = get_state(execution_engine)
# no support for PG+EE her
best_baseline_state = None
else:
# OR start a new one
if args.model_type in models_that_need_pg:
program_generator, pg_kwargs = get_program_generator(args)
logger.info('Here is the conditioning network:')
logger.info(program_generator)
if args.model_type in models_that_need_ee:
execution_engine, ee_kwargs = get_execution_engine(args)
logger.info('Here is the conditioned network:')
logger.info(execution_engine)
if args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
baseline_model, baseline_kwargs = get_baseline_model(args)
params = baseline_model.parameters()
if args.baseline_train_only_rnn == 1:
params = baseline_model.rnn.parameters()
logger.info('Here is the baseline model')
logger.info(baseline_model)
baseline_type = args.model_type
if args.load_ee_parameters:
state = vr.utils.load_cpu(args.load_ee_parameters)
execution_engine.load_state_dict(state['execution_engine_state'], strict=False)
optim_method = getattr(torch.optim, args.optimizer)
if program_generator:
pg_learning_rate = args.pg_learning_rate
if pg_learning_rate is None:
pg_learning_rate = args.learning_rate
pg_optimizer = optim_method(program_generator.parameters(),
lr=pg_learning_rate,
weight_decay=args.weight_decay,
eps=args.eps)
if execution_engine and not args.symbolic_ee:
if args.mac_train_just_control:
parameters = list(execution_engine.controlUnit.parameters())
for inpUnit in execution_engine.inputUnits:
parameters.extend(list(inpUnit.parameters()))
else:
parameters = execution_engine.parameters()
ee_optimizer = optim_method(parameters,
lr=args.learning_rate,
weight_decay=args.weight_decay,
eps=args.eps)
if baseline_model:
baseline_optimizer = optim_method(params,
lr=args.learning_rate,
weight_decay=args.weight_decay)
loss_fn = torch.nn.CrossEntropyLoss().to(device)
t, epoch, reward_moving_average = stats['model_t'], stats['model_epoch'], 0
set_mode('train', [program_generator, execution_engine, baseline_model])
logger.info('train_loader has {} samples'.format(len(train_loader.dataset)))
for val_part, val_loader in zip(args.val_part, val_loaders):
logger.info('{}_loader has {} samples'.format(val_part, len(val_loader.dataset)))
num_checkpoints = 0
epoch_start_time = 0.0
epoch_total_time = 0.0
train_pass_total_time = 0.0
val_pass_total_time = 0.0
valB_pass_total_time = 0.0
running_loss = 0.0
cache = [lru.LRU(10) for i in range(len(train_loader.dataset))]
while t < args.num_iterations:
if (epoch > 0) and (args.time == 1):
epoch_time = time.time() - epoch_start_time
epoch_total_time += epoch_time
logger.info('EPOCH PASS AVG TIME: ' + str(epoch_total_time / epoch), 'white')
logger.info('Epoch Pass Time : ' + str(epoch_time), 'white')
epoch_start_time = time.time()
fwd_pass_time = 0.
bwd_pass_time = 0.
epoch += 1
logger.info('Starting epoch %d' % epoch)
batch_start_time = time.time()
for batch in train_loader:
compute_start_time = time.time()
t += 1
acc = None
prog_acc = None
entropy = None
data_moving_start_time = time.time()
(questions, indices, feats, scenes, answers, programs) = batch
if isinstance(questions, list):
questions = questions[0]
questions = questions[:, :(questions.sum(0) > 0).sum()]
questions_var = Variable(questions.to(device))
feats_var = Variable(feats.to(device))
answers_var = Variable(answers.to(device))
if programs[0] is not None:
programs_var = Variable(programs.to(device))
data_moving_time = time.time() - compute_start_time
reward = None
if args.model_type == 'PG':
# Train program generator with ground-truth programs
pg_optimizer.zero_grad()
loss = program_generator.log_likelihood(questions_var, programs_var).mean()
loss.backward()
pg_optimizer.step()
elif args.model_type in ['EE', 'Hetero']:
# Train execution engine with ground-truth programs
ee_optimizer.zero_grad()
scores, _, _ = execution_engine(feats_var, programs_var, question=questions_var)
full_loss = loss = loss_fn(scores, answers_var)
acc = (scores.argmax(1) == answers_var).float().mean()
full_loss.backward()
ee_optimizer.step()
elif args.model_type in ['Control-EE']:
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
question_repr = program_generator(questions_var)
scores, _, _ = execution_engine(feats_var, programs_var, question=question_repr)
loss = loss_fn(scores, answers_var)
acc = (scores.argmax(1) == answers_var).float().mean()
loss = loss_fn(scores, answers_var)
loss.backward()
pg_optimizer.step()
ee_optimizer.step()
elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
baseline_optimizer.zero_grad()
baseline_model.zero_grad()
scores = baseline_model(questions_var, feats_var)
loss = loss_fn(scores, answers_var)
loss.backward()
baseline_optimizer.step()
elif args.model_type == 'PG+EE':
programs_pred, token_logprobs = program_generator.forward(questions_var)
if args.symbolic_ee:
preds = execution_engine(scenes, programs_pred)
else:
with torch.set_grad_enabled(bool(args.train_execution_engine)):
scores, program_wellformed, _ = execution_engine(feats_var, programs_pred)
preds = scores.argmax(1).cpu()
if args.enforce_wellformed:
preds[~program_wellformed] = -1
loss = loss_fn(scores, answers_var)
raw_reward = (preds == answers).float()
acc = raw_reward.mean()
if args.symbolic_ee:
loss = -acc
reward_moving_average *= args.reward_decay
reward_moving_average += (1.0 - args.reward_decay) * raw_reward.mean()
centered_reward = raw_reward - (reward_moving_average if args.ewa_baseline else 0.5)
entropy = -token_logprobs.sum(1).mean()
min_length = min(programs_var.shape[1], programs_pred.shape[1])
programs_pred = programs_pred[:, :min_length]
programs_var = programs_var[:, :min_length]
correct = (programs_pred == programs_var).int().sum(1) == min_length
prog_acc = correct.float().mean()
if args.train_execution_engine == 1:
ee_optimizer.zero_grad()
loss.backward()
ee_optimizer.step()
if args.train_program_generator == 1:
pg_optimizer.zero_grad()
weights = centered_reward.to(device)[:, None]
if args.entropy_coef:
# maximizing entropy = using -logprobs as rewards
weights += args.entropy_coef * -token_logprobs.sum(1)[:, None].detach()
if args.same_reward:
weights = weights.mean()
surrogate_loss = (-token_logprobs * weights).sum(1).mean()
surrogate_loss.backward()
pg_optimizer.step()
elif args.model_type == 'FiLM' or args.model_type == 'MAC':
if args.set_execution_engine_eval == 1:
set_mode('eval', [execution_engine])
forward_start_time = time.time()
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred)
loss = loss_fn(scores, answers_var)
full_loss = loss.clone()
fwd_pass_time = time.time() - forward_start_time
backward_start_time = time.time()
profile_step = t % 66 == 0
with torch.autograd.profiler.profile(enabled=profile_step, use_cuda=True) as prof:
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
if args.debug_every <= -2:
pdb.set_trace()
full_loss.backward()
if args.debug_every < float('inf'):
check_grad_num_nans(execution_engine, 'FiLMedNet' if args.model_type == 'FiLM' else args.model_type)
check_grad_num_nans(program_generator, 'FiLMGen')
if profile_step:
with open(args.checkpoint_path + '.prof', 'wb') as dest:
pickle.dump(prof, dest)
print('profile dumped')
bwd_pass_time = time.time() - backward_start_time
if args.model_type == 'MAC':
if args.train_program_generator == 1 or args.train_execution_engine == 1:
if args.grad_clip > 0:
allMacParams = itertools.chain(program_generator.parameters(), execution_engine.parameters())
torch.nn.utils.clip_grad_norm_(allMacParams, args.grad_clip)
pg_optimizer.step()
ee_optimizer.step()
else:
if args.train_program_generator == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(program_generator.parameters(), args.grad_clip)
pg_optimizer.step()
if args.train_execution_engine == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(execution_engine.parameters(), args.grad_clip)
ee_optimizer.step()
elif args.model_type == 'Tfilm':
if args.set_execution_engine_eval == 1:
set_mode('eval', [execution_engine])
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred, programs_var)
loss = loss_fn(scores, answers_var)
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
if args.debug_every <= -2:
pdb.set_trace()
loss.backward()
if args.debug_every < float('inf'):
check_grad_num_nans(execution_engine, 'TFiLMedNet' if args.model_type == 'Tfilm' else 'NMNFiLMedNet')
check_grad_num_nans(program_generator, 'FiLMGen')
if args.train_program_generator == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(program_generator.parameters(), args.grad_clip)
pg_optimizer.step()
if args.train_execution_engine == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(execution_engine.parameters(), args.grad_clip)
ee_optimizer.step()
elif args.model_type == 'RTfilm':
if args.set_execution_engine_eval == 1:
set_mode('eval', [execution_engine])
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred)
loss = loss_fn(scores, answers_var)
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
if args.debug_every <= -2:
pdb.set_trace()
loss.backward()
if args.debug_every < float('inf'):
check_grad_num_nans(execution_engine, 'RTFiLMedNet')
check_grad_num_nans(program_generator, 'FiLMGen')
if args.train_program_generator == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(program_generator.parameters(), args.grad_clip)
pg_optimizer.step()
if args.train_execution_engine == 1:
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(execution_engine.parameters(), args.grad_clip)
ee_optimizer.step()
elif args.model_type in ['RelNet', 'ConvLSTM']:
question_rep = program_generator(questions_var)
scores = execution_engine(feats_var, question_rep)
loss = loss_fn(scores, answers_var)
pg_optimizer.zero_grad()
ee_optimizer.zero_grad()
loss.backward()
pg_optimizer.step()
ee_optimizer.step()
else:
raise ValueError()
if torch.isnan(loss).item():
print("NAN!")
sys.exit(1)
if t == args.num_iterations:
# Save the best model separately
break
if t % args.record_loss_every == 0:
if 'miss_mask' in locals():
print(miss_mask.sum())
running_loss += loss.item()
avg_loss = running_loss / args.record_loss_every
compute_time = time.time() - compute_start_time
batch_time = time.time() - batch_start_time
logger_format = "iter: {} t_b: {:.5f} t_c: {:.5f} t_m: {:.5f} t_fwd: {:.5f} t_bwd: {:.5f} loss: {:.5f}"
logger_data = (
t, batch_time, compute_time,
data_moving_time, fwd_pass_time, bwd_pass_time, avg_loss)
if acc is not None:
logger_format += " acc: {:.5f}"
logger_data += (acc.item(),)
if prog_acc is not None:
logger_format += " prog_acc: {:.5f}"
logger_data += (prog_acc.item(),)
if entropy is not None:
logger_format += " H: {:.9f}"
logger_data += (entropy.item(),)
logger.info(logger_format.format(*logger_data))
stats['train_losses'].append(avg_loss)
if prog_acc:
stats['prog_acc'].append(prog_acc.item())
if entropy:
stats['entropy'].append(entropy.item())
stats['train_losses_ts'].append(t)
if reward is not None:
stats['train_rewards'].append(reward.item())
stats['compute_time'].append(compute_time)
running_loss = 0.0
else:
running_loss += loss.item()
batch_start_time = time.time()
if args.local_rank > 0:
continue
if t == 1 or t % args.validate_every == 0:
logger.info('Checking training accuracy ... ')
start = time.time()
train_acc = check_accuracy(args, program_generator, execution_engine,
baseline_model, train_loader)
train_pass_time = (time.time() - start)
logger.info('train pass time: ' + str(train_pass_time))
logger.info('train accuracy is {}'.format(train_acc))
logger.info('Checking validation accuracy ...')
stats['train_accs'].append(train_acc)
first_val_acc = None
for val_part, val_loader in zip(args.val_part, val_loaders):
start = time.time()
val_acc = check_accuracy(args, program_generator, execution_engine,
baseline_model, val_loader)
if first_val_acc is None:
first_val_acc = val_acc
val_pass_time = (time.time() - start)
logger.info('{} pass time: {}'.format(val_part, val_pass_time))
logger.info('{} accuracy is {}'.format(val_part, val_acc))
stats['{}_accs'.format(val_part)].append(val_acc)
stats['val_accs_ts'].append(t)
if t == 1 or t % args.checkpoint_every == 0:
pg_state = get_state(program_generator)
ee_state = get_state(execution_engine)
baseline_state = get_state(baseline_model)
stats['model_t'] = t
stats['model_epoch'] = epoch
checkpoint = {
'args': args.__dict__,
'program_generator_kwargs': pg_kwargs,
'program_generator_state': pg_state,
'execution_engine_kwargs': ee_kwargs,
'execution_engine_state': ee_state,
'baseline_kwargs': baseline_kwargs,
'baseline_state': baseline_state,
'baseline_type': baseline_type,
'vocab': vocab
}
for k, v in stats.items():
checkpoint[k] = v
# Save current model
logger.info('Saving checkpoint to %s' % args.checkpoint_path)
atomic_torch_save(checkpoint, args.checkpoint_path)
# Save training status in a human-readable format
del checkpoint['program_generator_state']
del checkpoint['execution_engine_state']
del checkpoint['baseline_state']
with open(args.checkpoint_path + '.json', 'w') as f:
json.dump(checkpoint, f, indent=2, sort_keys=True)
# Save the best model separately
if t == 1 or t % args.validate_every == 0:
for val_part in args.val_part:
cur_acc = stats['{}_accs'.format(val_part)][-1]
best_acc_key = 'best_{}_acc'.format(val_part)
if cur_acc > stats.get(best_acc_key, -1):
best_path = '{}.{}.best'.format(args.checkpoint_path, val_part)
logger.info('Saving best so far checkpoint to ' + best_path)
stats[best_acc_key] = cur_acc
checkpoint['program_generator_state'] = pg_state
checkpoint['execution_engine_state'] = ee_state
checkpoint['baseline_state'] = baseline_state
atomic_torch_save(checkpoint, best_path)
def get_state(m):
if m is None:
return None
if isinstance(m, DistributedDataParallel):
return get_state(m.module)
if isinstance(m, ClevrExecutor):
return {}
state = {}
for k, v in m.state_dict().items():
state[k] = v.clone()
return state
def get_program_generator(args):
vocab = vr.utils.load_vocab(args.vocab_json)
if args.program_generator_start_from is not None:
logger.info('start from pretrained PG')
pg, kwargs = vr.utils.load_program_generator(args.program_generator_start_from)
if args.temperature_increase:
pg.decoder_linear.weight.data /= args.temperature_increase
pg.decoder_linear.bias.data /= args.temperature_increase
elif args.ns_vqa:
pg, kwargs = Seq2seqParser(vocab), {}
else:
kwargs = {
'encoder_vocab_size': len(vocab['question_token_to_idx']),
'decoder_vocab_size': len(vocab['program_token_to_idx']),
'wordvec_dim': args.rnn_wordvec_dim,
'hidden_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
}
if args.model_type in ['FiLM', 'Tfilm', 'RTfilm', 'MAC', 'Control-EE']:
kwargs['parameter_efficient'] = args.program_generator_parameter_efficient == 1
kwargs['output_batchnorm'] = args.rnn_output_batchnorm == 1
kwargs['bidirectional'] = args.bidirectional == 1
kwargs['encoder_type'] = args.encoder_type
kwargs['decoder_type'] = args.decoder_type
kwargs['gamma_option'] = args.gamma_option
kwargs['gamma_baseline'] = args.gamma_baseline
kwargs['use_attention'] = args.film_use_attention == 1
if args.model_type == 'FiLM' or args.model_type == 'MAC':
kwargs['num_modules'] = args.num_modules
elif args.model_type == 'Tfilm':
kwargs['num_modules'] = args.max_program_module_arity * args.max_program_tree_depth + 1
elif args.model_type == 'RTfilm':
treeArities = TreeGenerator().gen(args.tree_type_for_RTfilm)
kwargs['num_modules'] = len(treeArities)
if args.model_type == 'MAC' or args.model_type == 'Control-EE':
kwargs['taking_context'] = True
kwargs['use_attention'] = False
kwargs['variational_embedding_dropout'] = args.variational_embedding_dropout
kwargs['embedding_uniform_boundary'] = args.mac_embedding_uniform_boundary
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_dim'] = args.module_dim
kwargs['debug_every'] = args.debug_every
pg = FiLMGen(**kwargs)
elif args.model_type in ['RelNet', 'ConvLSTM']:
kwargs['bidirectional'] = args.bidirectional == 1
kwargs['encoder_type'] = args.encoder_type
kwargs['taking_context'] = True # return the last hidden state of LSTM
pg = FiLMGen(**kwargs)
elif args.rnn_attention:
kwargs['autoregressive'] = not args.rnn_nonautoreg
pg = Seq2SeqAtt(**kwargs)
else:
pg = Seq2Seq(**kwargs)
pg.to(device)
pg.train()
if is_multigpu():
pg = DistributedDataParallel(pg, device_ids=[args.local_rank])
return pg, kwargs
def get_execution_engine(args):
vocab = vr.utils.load_vocab(args.vocab_json)
if args.symbolic_ee:
return ClevrExecutor(vocab), {}
if args.execution_engine_start_from is not None:
logger.info("start from pretrained EE")
ee, kwargs = vr.utils.load_execution_engine(args.execution_engine_start_from)
else:
kwargs = {
'vocab': vocab,
'feature_dim': args.feature_dim,
'stem_batchnorm': args.module_stem_batchnorm == 1,
'stem_num_layers': args.module_stem_num_layers,
'stem_subsample_layers': args.module_stem_subsample_layers,
'stem_kernel_size': args.module_stem_kernel_size,
'stem_stride': args.module_stem_stride,
'stem_padding': args.module_stem_padding,
'stem_dim': args.stem_dim,
'module_dim': args.module_dim,
'module_kernel_size': args.module_kernel_size,
'module_residual': args.module_residual == 1,
'module_input_proj': args.module_input_proj,
'module_batchnorm': args.module_batchnorm == 1,
'classifier_proj_dim': args.classifier_proj_dim,
'classifier_downsample': args.classifier_downsample,
'classifier_fc_layers': args.classifier_fc_dims,
'classifier_batchnorm': args.classifier_batchnorm == 1,
'classifier_dropout': args.classifier_dropout,
}
if args.model_type == 'FiLM':
kwargs['num_modules'] = args.num_modules
kwargs['stem_kernel_size'] = args.module_stem_kernel_size
kwargs['stem_stride'] = args.module_stem_stride
kwargs['stem_padding'] = args.module_stem_padding
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_intermediate_batchnorm'] = args.module_intermediate_batchnorm == 1
kwargs['module_batchnorm_affine'] = args.module_batchnorm_affine == 1
kwargs['module_dropout'] = args.module_dropout
kwargs['module_input_proj'] = args.module_input_proj
kwargs['module_kernel_size'] = args.module_kernel_size
kwargs['use_gamma'] = args.use_gamma == 1
kwargs['use_beta'] = args.use_beta == 1
kwargs['use_coords'] = args.use_coords
kwargs['debug_every'] = args.debug_every
kwargs['print_verbose_every'] = args.print_verbose_every
kwargs['condition_method'] = args.condition_method
kwargs['condition_pattern'] = args.condition_pattern
ee = FiLMedNet(**kwargs)
elif args.model_type == 'Tfilm':
kwargs['num_modules'] = args.max_program_module_arity * args.max_program_tree_depth + 1
kwargs['max_program_module_arity'] = args.max_program_module_arity
kwargs['max_program_tree_depth'] = args.max_program_tree_depth
kwargs['stem_kernel_size'] = args.module_stem_kernel_size
kwargs['stem_stride'] = args.module_stem_stride
kwargs['stem_padding'] = args.module_stem_padding
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_intermediate_batchnorm'] = args.module_intermediate_batchnorm == 1
kwargs['module_batchnorm_affine'] = args.module_batchnorm_affine == 1
kwargs['module_dropout'] = args.module_dropout
kwargs['module_input_proj'] = args.module_input_proj
kwargs['module_kernel_size'] = args.module_kernel_size
kwargs['use_gamma'] = args.use_gamma == 1
kwargs['use_beta'] = args.use_beta == 1
kwargs['use_coords'] = args.use_coords
kwargs['debug_every'] = args.debug_every
kwargs['print_verbose_every'] = args.print_verbose_every
kwargs['condition_method'] = args.condition_method
kwargs['condition_pattern'] = args.condition_pattern
ee = TFiLMedNet(**kwargs)
elif args.model_type == 'RTfilm':
treeArities = TreeGenerator().gen(args.tree_type_for_RTfilm)
kwargs['num_modules'] = len(treeArities)
kwargs['treeArities'] = treeArities
kwargs['tree_type_for_RTfilm'] = args.tree_type_for_RTfilm
kwargs['share_module_weight_at_depth'] = args.share_module_weight_at_depth
kwargs['stem_kernel_size'] = args.module_stem_kernel_size
kwargs['stem_stride'] = args.module_stem_stride
kwargs['stem_padding'] = args.module_stem_padding
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_intermediate_batchnorm'] = args.module_intermediate_batchnorm == 1
kwargs['module_batchnorm_affine'] = args.module_batchnorm_affine == 1
kwargs['module_dropout'] = args.module_dropout
kwargs['module_input_proj'] = args.module_input_proj
kwargs['module_kernel_size'] = args.module_kernel_size
kwargs['use_gamma'] = args.use_gamma == 1
kwargs['use_beta'] = args.use_beta == 1
kwargs['use_coords'] = args.use_coords
kwargs['debug_every'] = args.debug_every
kwargs['print_verbose_every'] = args.print_verbose_every
kwargs['condition_method'] = args.condition_method
kwargs['condition_pattern'] = args.condition_pattern
ee = RTFiLMedNet(**kwargs)
elif args.model_type == 'MAC':
kwargs = {
'vocab': vocab,
'feature_dim': args.feature_dim,
'stem_num_layers': args.module_stem_num_layers,
'stem_batchnorm': args.module_stem_batchnorm == 1,
'stem_kernel_size': args.module_stem_kernel_size,
'stem_subsample_layers': args.module_stem_subsample_layers,
'stem_stride': args.module_stem_stride,
'stem_padding': args.module_stem_padding,
'num_modules': args.num_modules,
'module_dim': args.module_dim,
'stem_dim': args.stem_dim,
#'module_dropout': args.module_dropout,
'question_embedding_dropout': args.mac_question_embedding_dropout,
'stem_dropout': args.mac_stem_dropout,
'memory_dropout': args.mac_memory_dropout,
'read_dropout': args.mac_read_dropout,
'write_unit': args.mac_write_unit,
'read_connect': args.mac_read_connect,
'read_unit': args.mac_read_unit,
'question2output': args.mac_question2output,
'noisy_controls': bool(args.mac_vib_coof),
'use_prior_control_in_control_unit': args.mac_use_prior_control_in_control_unit == 1,
'use_self_attention': args.mac_use_self_attention,
'use_memory_gate': args.mac_use_memory_gate,
'nonlinearity': args.mac_nonlinearity,
'classifier_fc_layers': args.classifier_fc_dims,
'classifier_batchnorm': args.classifier_batchnorm == 1,
'classifier_dropout': args.classifier_dropout,
'use_coords': args.use_coords,
'debug_every': args.debug_every,
'print_verbose_every': args.print_verbose_every,
'hard_code_control' : args.hard_code_control
}
ee = MAC(**kwargs)
elif args.model_type == 'Hetero':
kwargs = {
'vocab': vocab,
'feature_dim': args.feature_dim,
'stem_batchnorm': args.module_stem_batchnorm == 1,
'stem_num_layers': args.module_stem_num_layers,
'stem_kernel_size': args.module_stem_kernel_size,
'stem_stride': args.module_stem_stride,
'stem_padding': args.module_stem_padding,
'module_dim': args.module_dim,
'stem_dim': args.stem_dim,
'module_batchnorm': args.module_batchnorm == 1,
}
ee = HeteroModuleNet(**kwargs)
elif args.model_type == 'SimpleNMN':
kwargs['use_film'] = args.nmn_use_film
kwargs['forward_func'] = args.nmn_type
kwargs['use_color'] = args.use_color,
ee = SimpleModuleNet(**kwargs)
elif args.model_type == 'SHNMN':
kwargs = {
'vocab' : vocab,
'feature_dim' : args.feature_dim,
'stem_dim' : args.stem_dim,
'module_dim': args.module_dim,
'module_kernel_size' : args.module_kernel_size,
'stem_subsample_layers': args.module_stem_subsample_layers,
'stem_num_layers': args.module_stem_num_layers,
'stem_kernel_size': args.module_stem_kernel_size,
'stem_padding': args.module_stem_padding,
'stem_batchnorm': args.module_stem_batchnorm == 1,
'classifier_fc_layers': args.classifier_fc_dims,
'classifier_proj_dim': args.classifier_proj_dim,
'classifier_downsample': args.classifier_downsample,
'classifier_batchnorm': args.classifier_batchnorm == 1,
'classifier_dropout' : args.classifier_dropout,
'hard_code_alpha' : args.hard_code_alpha,
'hard_code_tau' : args.hard_code_tau,
'tau_init' : args.tau_init,
'alpha_init' : args.alpha_init,
'which_chain' : args.which_chain,
'model_type' : args.shnmn_type,
'model_bernoulli' : args.model_bernoulli,
'num_modules' : 3,
'use_module' : args.use_module
}
ee = SHNMN(**kwargs)
elif args.model_type == 'RelNet':
kwargs['module_num_layers'] = args.module_num_layers
kwargs['rnn_hidden_dim'] = args.rnn_hidden_dim
ee = RelationNet(**kwargs)
elif args.model_type == 'ConvLSTM':
kwargs['rnn_hidden_dim'] = args.rnn_hidden_dim
ee = ConvLSTM(**kwargs)
else:
kwargs['use_film'] = args.nmn_use_film
kwargs['use_simple_block'] = args.nmn_use_simple_block
kwargs['mod_id_loss'] = False
kwargs['kl_loss'] = False
kwargs['module_pool'] = args.nmn_module_pool
kwargs['module_num_layers'] = args.module_num_layers
kwargs['module_use_gammas'] = args.nmn_use_gammas
kwargs['learn_control'] = args.nmn_learn_control
kwargs['rnn_dim'] = args.rnn_hidden_dim
kwargs['type_anonymizer'] = False
kwargs['discriminator_proj_dim'] = args.discriminator_proj_dim
kwargs['discriminator_downsample'] = args.discriminator_downsample
kwargs['discriminator_fc_layers'] = args.discriminator_fc_dims
kwargs['discriminator_dropout'] = args.discriminator_dropout
ee = ModuleNet(**kwargs)
ee.to(device)
ee.train()
if is_multigpu():
ee = DistributedDataParallel(ee, device_ids=[args.local_rank])
return ee, kwargs
def get_baseline_model(args):
vocab = vr.utils.load_vocab(args.vocab_json)
if args.baseline_start_from is not None:
model, kwargs = vr.utils.load_baseline(args.baseline_start_from)
elif args.model_type == 'LSTM':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'fc_dims': args.classifier_fc_dims,
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = LstmModel(**kwargs)
elif args.model_type == 'CNN+LSTM':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'cnn_feat_dim': args.feature_dim,
'cnn_num_res_blocks': args.cnn_num_res_blocks,
'cnn_res_block_dim': args.cnn_res_block_dim,
'cnn_proj_dim': args.cnn_proj_dim,
'cnn_pooling': args.cnn_pooling,
'fc_dims': args.classifier_fc_dims,
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = CnnLstmModel(**kwargs)
elif args.model_type == 'CNN+LSTM+SA':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'cnn_feat_dim': args.feature_dim,
'stacked_attn_dim': args.stacked_attn_dim,
'num_stacked_attn': args.num_stacked_attn,
'fc_dims': args.classifier_fc_dims,
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = CnnLstmSaModel(**kwargs)
if model.rnn.token_to_idx != vocab['question_token_to_idx']:
# Make sure new vocab is superset of old
for k, v in model.rnn.token_to_idx.items():
assert k in vocab['question_token_to_idx']
assert vocab['question_token_to_idx'][k] == v
for token, idx in vocab['question_token_to_idx'].items():
model.rnn.token_to_idx[token] = idx
kwargs['vocab'] = vocab
model.rnn.expand_vocab(vocab['question_token_to_idx'])
model.to(device)
model.train()
return model, kwargs
def set_mode(mode, models):
assert mode in ['train', 'eval']
for m in models:
if m is None or isinstance(m, ClevrExecutor): continue
if mode == 'train': m.train()
if mode == 'eval': m.eval()
def check_accuracy(args, program_generator, execution_engine, baseline_model, loader):
set_mode('eval', [program_generator, execution_engine, baseline_model])
num_correct, num_samples = 0, 0
for batch in loader:
(questions, _, feats, scenes, answers, programs) = batch
if isinstance(questions, list):
questions = questions[0]
questions = questions[:, :(questions.sum(0) > 0).sum()]
questions_var = questions.to(device)
feats_var = feats.to(device)
if programs[0] is not None:
programs_var = programs.to(device)
def scope():
nonlocal num_samples
nonlocal num_correct
scores = None # Use this for everything but PG
if args.model_type == 'PG':
#TODO(mnoukhov) change to scores for attention
vocab = vr.utils.load_vocab(args.vocab_json)
programs_pred, _ = program_generator.forward(questions_var)
for i in range(questions.size(0)):
program_pred_str = vr.preprocess.decode(programs_pred[i].tolist(), vocab['program_idx_to_token'])
program_str = vr.preprocess.decode(programs[i].tolist(), vocab['program_idx_to_token'])
if program_pred_str == program_str:
num_correct += 1
num_samples += 1
return
elif args.model_type in ['EE', 'Hetero']:
scores, _2, _3 = execution_engine(feats_var, programs_var)
elif args.model_type == 'PG+EE':
programs_pred, _ = program_generator.forward(questions_var, argmax=True)
if isinstance(execution_engine, ClevrExecutor):
preds = execution_engine(scenes, programs_pred)
else:
scores, _2, _3 = execution_engine(feats_var, programs_pred)
elif args.model_type == 'Control-EE':
questions_repr = program_generator(questions_var)
scores, _2, _3 = execution_engine(feats_var, programs_var, question=questions_repr)
elif args.model_type == 'FiLM' or args.model_type == 'RTfilm':
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred)
elif args.model_type == 'Tfilm':
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred, programs_var)
elif args.model_type == 'MAC':
programs_pred = program_generator(questions_var)
scores = execution_engine(feats_var, programs_pred, isTest=True)
elif args.model_type in ['ConvLSTM', 'RelNet']:
question_rep = program_generator(questions_var)
scores = execution_engine(feats_var, question_rep)
elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
scores = baseline_model(questions_var, feats_var)
elif args.model_type in ['SimpleNMN', 'SHNMN']:
scores = execution_engine(feats_var, questions_var)
else:
raise NotImplementedError('model ', args.model_type, ' check_accuracy not implemented')
if scores is not None:
_, preds = scores.data.cpu().max(1)
num_correct += (preds == answers).sum().item()
num_samples += preds.size(0)
# dirty trick to make pytorch free memory earlier
with torch.no_grad():
scope()
if args.num_val_samples is not None and num_samples >= args.num_val_samples:
break
set_mode('train', [program_generator, execution_engine, baseline_model])
acc = float(num_correct) / num_samples
print("num check samples", num_samples)
return acc
def check_grad_num_nans(model, model_name='model'):
grads = [p.grad for p in model.parameters() if p.grad is not None]
num_nans = [np.sum(np.isnan(grad.data.cpu().numpy())) for grad in grads]
nan_checks = [num_nan == 0 for num_nan in num_nans]
if False in nan_checks:
print('Nans in ' + model_name + ' gradient!')
print(num_nans)
pdb.set_trace()
raise(Exception)
if __name__ == '__main__':
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="%(name)s: %(asctime)s: %(message)s")
main(args)
| 60,588 | 45.642802 | 138 | py |
CLOSURE | CLOSURE-master/scripts/run_model.py | # This code is released under the MIT License in association with the following paper:
#
# CLOSURE: Assessing Systematic Generalization of CLEVR Models (https://arxiv.org/abs/1912.05783).
#
# Full copyright and license information (including third party attribution) in the NOTICE file (https://github.com/rizar/CLOSURE/NOTICE).
import argparse
import json
import random
import shutil
from termcolor import colored
import time
from tqdm import tqdm
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import numpy as np
import h5py
from scipy.misc import imread, imresize, imsave
import vr.utils as utils
import vr.programs
from vr.data import ClevrDataset, ClevrDataLoader
from vr.ns_vqa.clevr_executor import ClevrExecutor
from vr.ns_vqa.parser import Seq2seqParser
from vr.preprocess import tokenize, encode
from vr.models import *
parser = argparse.ArgumentParser()
parser.add_argument('--program_generator', default=None)
parser.add_argument('--execution_engine', default=None)
parser.add_argument('--baseline_model', default=None)
parser.add_argument('--debug_every', default=float('inf'), type=float)
parser.add_argument('--use_gpu', default=torch.cuda.is_available(), type=int)
# For running on a preprocessed dataset
parser.add_argument('--data_dir', default=None, type=str)
parser.add_argument('--part', default='val', type=str)
# This will override the vocab stored in the checkpoint;
# we need this to run CLEVR models on human data
parser.add_argument('--vocab_json', default=None)
# For running on a single example
parser.add_argument('--question', default=None)
parser.add_argument('--image', default='img/CLEVR_val_000017.png')
parser.add_argument('--cnn_model', default='resnet101')
parser.add_argument('--cnn_model_stage', default=3, type=int)
parser.add_argument('--image_width', default=224, type=int)
parser.add_argument('--image_height', default=224, type=int)
parser.add_argument('--enforce_clevr_vocab', default=1, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_samples', default=None, type=int)
parser.add_argument('--num_last_words_shuffled', default=0, type=int) # -1 for all shuffled
parser.add_argument('--q_family', type=int, action='append')
parser.add_argument('--sample_argmax', type=int, default=1)
parser.add_argument('--temperature', default=None, type=float)
# FiLM models only
parser.add_argument('--gamma_option', default='linear',
choices=['linear', 'sigmoid', 'tanh', 'exp', 'relu', 'softplus'])
parser.add_argument('--gamma_scale', default=1, type=float)
parser.add_argument('--gamma_shift', default=0, type=float)
parser.add_argument('--gammas_from', default=None) # Load gammas from file
parser.add_argument('--beta_option', default='linear',
choices=['linear', 'sigmoid', 'tanh', 'exp', 'relu', 'softplus'])
parser.add_argument('--beta_scale', default=1, type=float)
parser.add_argument('--beta_shift', default=0, type=float)
parser.add_argument('--betas_from', default=None) # Load betas from file
# If this is passed, then save all predictions to this file
parser.add_argument('--output_h5', default=None)
parser.add_argument('--dump_module_info', action='store_true')
parser.add_argument('--output_preds', default=None)
parser.add_argument('--output_viz_dir', default='img/')
parser.add_argument('--output_program_stats_dir', default=None)
grads = {}
programs = {} # NOTE: Useful for zero-shot program manipulation when in debug mode
def main(args):
if not args.program_generator:
args.program_generator = args.execution_engine
input_question_h5 = os.path.join(args.data_dir, '{}_questions.h5'.format(args.part))
input_features_h5 = os.path.join(args.data_dir, '{}_features.h5'.format(args.part))
input_scenes = os.path.join(args.data_dir, '{}_scenes.json'.format(args.part))
vocab = load_vocab(args)
pg, _ = utils.load_program_generator(args.program_generator)
if pg:
pg.save_activations = True
if args.temperature:
pg.decoder_linear.weight.data /= args.temperature
pg.decoder_linear.bias.data /= args.temperature
if args.execution_engine:
ee, _ = utils.load_execution_engine(
args.execution_engine, verbose=False)
ee.noise_enabled = False
else:
ee = ClevrExecutor(vocab)
dtype = torch.FloatTensor
if args.use_gpu == 1:
dtype = torch.cuda.FloatTensor
loader_kwargs = {
'question_h5': input_question_h5,
'feature_h5': input_features_h5,
'scene_path': input_scenes if isinstance(ee, ClevrExecutor) else None,
'vocab': vocab,
'batch_size': args.batch_size,
}
if args.num_samples is not None and args.num_samples > 0:
loader_kwargs['max_samples'] = args.num_samples
if args.q_family:
loader_kwargs['question_families'] = args.q_family
with ClevrDataLoader(**loader_kwargs) as loader:
with torch.no_grad():
run_batch(args, pg, ee, loader, dtype)
def run_batch(args, pg, ee, loader, dtype):
if pg:
pg.type(dtype)
pg.eval()
if ee and not isinstance(ee, ClevrExecutor):
ee.type(dtype)
ee.eval()
all_scores = []
all_probs = []
all_preds = []
all_correct = []
all_programs = []
all_groundtruth_programs = []
all_questions = []
all_correct_programs = []
all_seq2seq_attentions = []
num_samples = 0
total_nll = 0
total_prob = 0
start = time.time()
for batch in tqdm(loader):
assert(not pg or not pg.training)
assert(isinstance(ee, ClevrExecutor) or not ee.training)
questions, images, feats, scenes, answers, programs = batch
questions_var = questions[0].type(dtype).long()
questions_var = questions_var[:, :(questions_var.sum(0) > 0).sum()]
feats_var = feats.type(dtype)
programs_var = programs.to(feats_var.device)
question_repr = None
programs_pred = None
# PG
if isinstance(pg, FiLMGen):
question_repr = pg(questions_var)
if isinstance(pg, (Seq2seqParser, Seq2SeqAtt)):
programs_pred, _ = pg(questions_var, argmax=True)
all_groundtruth_programs.append(F.pad(programs_var, (0, 30 - programs_var.shape[1], 0, 0)))
all_programs.append(F.pad(programs_pred, (0, 30 - programs_pred.shape[1], 0, 0)))
all_questions.append(F.pad(questions_var, (0, 50 - questions_var.shape[1], 0, 0)))
for _ in range(30 - len(pg._attn_weights)):
pg._attn_weights.append(torch.zeros_like(pg._attn_weights[0]))
attn_weights = [F.pad(a, (0, 50 - a.shape[2], 0, 0, 0, 0)) for a in pg._attn_weights]
all_seq2seq_attentions.append(torch.cat(attn_weights, 1))
nlls = pg.log_likelihood(questions_var, programs_var)
total_nll += nlls.sum()
total_prob += torch.exp(-nlls).sum()
else:
programs_pred = programs_var
# EE
# arg 1
if isinstance(ee, ClevrExecutor):
pos_args = [scenes]
else:
pos_args = [feats_var]
# arg 2
if isinstance(ee, (ModuleNet, ClevrExecutor)):
pos_args.append(programs_pred)
else:
pos_args.append(question_repr)
# kwargs
kwargs = ({'save_activations': True}
if isinstance(ee, (FiLMedNet, ModuleNet, MAC))
else {})
if isinstance(ee, ModuleNet) and ee.learn_control:
kwargs['question'] = question_repr
result = ee(*pos_args, **kwargs)
# unpack outputs
preds = scores = None
if isinstance(ee, ModuleNet):
scores, _2, mod_id_targets = result
elif isinstance(ee, ClevrExecutor):
preds = result
else:
scores = result
# compute predictions
if preds is None:
probs = F.softmax(scores, dim=1)
_, preds = scores.data.cpu().max(1)
all_preds.append(preds.cpu().clone())
all_correct.append(preds == answers)
if programs_pred is not None:
min_length = min(programs_var.shape[1], programs_pred.shape[1])
programs_pred = programs_pred[:, :min_length]
programs_var = programs_var[:, :min_length]
correct_programs = (programs_pred == programs_var).int().sum(1) == min_length
all_correct_programs.append(correct_programs.cpu().clone())
if args.dump_module_info:
all_module_outputs.append(ee.module_outputs.cpu().detach())
all_mod_id_targets.append(mod_id_targets.cpu().detach())
num_samples += preds.size(0)
num_correct = torch.cat(all_correct, 0).sum().item()
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
if all_correct_programs:
num_correct_programs = torch.cat(all_correct_programs, 0).sum().item()
prog_acc = float(num_correct_programs) / num_samples
print('Got %d / %d = %.2f programs correct' % (num_correct_programs, num_samples, 100 * prog_acc))
if total_nll:
print("GT program NLL: {}".format(total_nll / num_samples))
print("Average probability of sampling a GT program: {}".format(total_prob / num_samples))
print('%.2fs to evaluate' % (start - time.time()))
model = args.execution_engine if args.execution_engine else args.program_generator
output_path = ('output_' + args.part + "_" + model.split('.')[0].replace('/', '_') + ".h5"
if not args.output_h5
else args.output_h5)
print('Writing output to "%s"' % output_path)
with h5py.File(output_path, 'w') as fout:
fout.create_dataset('correct', data=torch.cat(all_correct, 0).numpy())
if all_scores:
fout.create_dataset('scores', data=torch.cat(all_scores, 0).numpy())
fout.create_dataset('probs', data=torch.cat(all_probs, 0).numpy())
if all_correct_programs:
fout.create_dataset('correct_programs', data=torch.cat(all_correct_programs, 0).numpy())
if all_seq2seq_attentions:
fout.create_dataset('seq2seq_attentions', data=torch.cat(all_seq2seq_attentions, 0).cpu().numpy())
if all_programs:
fout.create_dataset('programs', data=torch.cat(all_programs, 0).cpu().numpy())
if all_groundtruth_programs:
fout.create_dataset('groundtruth_programs', data=torch.cat(all_groundtruth_programs, 0).cpu().numpy())
if all_questions:
fout.create_dataset('questions', data=torch.cat(all_questions, 0).cpu().numpy())
if args.output_preds is not None:
all_preds_strings = []
for i in range(len(all_preds)):
all_preds_strings.append(vocab['answer_idx_to_token'][all_preds[i]])
save_to_file(all_preds_strings, args.output_preds)
if args.debug_every <= 1:
pdb.set_trace()
return
def load_vocab(args):
path = None
if args.baseline_model is not None:
path = args.baseline_model
elif args.program_generator is not None:
path = args.program_generator
elif args.execution_engine is not None:
path = args.execution_engine
return utils.load_cpu(path)['vocab']
def save_grad(name):
def hook(grad):
grads[name] = grad
return hook
def save_to_file(text, filename):
with open(filename, mode='wt', encoding='utf-8') as myfile:
myfile.write('\n'.join(text))
myfile.write('\n')
def get_index(l, index, default=-1):
try:
return l.index(index)
except ValueError:
return default
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 11,896 | 36.648734 | 138 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/dataloader.py | import sys
import traceback
import logging
import random
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import torch
from torch.utils.data import IterableDataset
from streaming import StreamSampler
def news_sample(news, ratio):
if ratio > len(news):
return news + [0] * (ratio - len(news))
else:
return random.sample(news, ratio)
class DataLoaderTrain(IterableDataset):
def __init__(self,
data_dir,
filename_pat,
args,
world_size,
worker_rank,
cuda_device_idx,
news_index,
news_combined,
teacher_embs,
word_dict,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.shuffle_buffer_size = args.shuffle_buffer_size
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = -1
self.num_teachers = args.num_teachers
self.teacher_embs = teacher_embs
self.news_combined = news_combined
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_buffer_size=self.shuffle_buffer_size,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
self.sampler.__iter__()
def trans_to_nindex(self, nids):
return [self.news_index[i] if i in self.news_index else 0 for i in nids]
def pad_to_fix_len(self, x, fix_length, padding_front=True, padding_value=0):
if padding_front:
pad_x = [padding_value] * (fix_length-len(x)) + x[-fix_length:]
mask = [0] * (fix_length-len(x)) + [1] * min(fix_length, len(x))
else:
pad_x = x[-fix_length:] + [padding_value]*(fix_length-len(x))
mask = [1] * min(fix_length, len(x)) + [0] * (fix_length-len(x))
return pad_x, mask
def _produce(self):
# need to reset cuda device in produce thread.
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def start_async(self):
self.aval_count = 0
self.stopped = False
self.outputs = Queue(10)
self.pool = ThreadPoolExecutor(1)
self.pool.submit(self._produce)
def _process(self, batch):
batch = [x.decode(encoding="utf-8").split("\t") for x in batch]
user_feature_batch, log_mask_batch, news_feature_batch, label_batch = [], [], [], []
teacher_history_batch, teacher_candidate_batch = [[] for _ in range(
self.num_teachers)], [[] for _ in range(self.num_teachers)]
for line in batch:
click_docs = line[3].split()
sess_pos = line[4].split()
sess_neg = line[5].split()
click_docs, log_mask = self.pad_to_fix_len(
self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_combined[click_docs]
pos = self.trans_to_nindex(sess_pos)
neg = self.trans_to_nindex(sess_neg)
label = random.randint(0, self.npratio)
sample_news = neg[:label] + pos + neg[label:]
news_feature = self.news_combined[sample_news]
for i in range(self.num_teachers):
teacher_history_batch[i].append(
self.teacher_embs[i][click_docs])
teacher_candidate_batch[i].append(
self.teacher_embs[i][sample_news])
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(label)
if self.enable_gpu:
user_feature_batch = torch.LongTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
news_feature_batch = torch.LongTensor(news_feature_batch).cuda()
label_batch = torch.LongTensor(label_batch).cuda()
for i in range(self.num_teachers):
teacher_history_batch[i] = torch.FloatTensor(
teacher_history_batch[i]).cuda()
teacher_candidate_batch[i] = torch.FloatTensor(
teacher_candidate_batch[i]).cuda()
else:
user_feature_batch = torch.LongTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
news_feature_batch = torch.LongTensor(news_feature_batch)
label_batch = torch.LongTensor(label_batch)
for i in range(self.num_teachers):
teacher_history_batch[i] = torch.FloatTensor(
teacher_history_batch[i])
teacher_candidate_batch[i] = torch.FloatTensor(
teacher_candidate_batch[i])
return user_feature_batch, log_mask_batch, news_feature_batch, label_batch, teacher_history_batch, teacher_candidate_batch
def __iter__(self):
"""Implement IterableDataset method to provide data iterator."""
logging.info("DataLoader __iter__()")
if self.enable_prefetch:
self.join()
self.start_async()
else:
self.start()
return self
def __next__(self):
if self.sampler and self.sampler.reach_end() and self.aval_count == 0:
raise StopIteration
if self.enable_prefetch:
next_batch = self.outputs.get()
self.outputs.task_done()
self.aval_count -= 1
else:
next_batch = self._process(self.sampler.__next__())
return next_batch
def join(self):
self.stopped = True
if self.sampler:
if self.enable_prefetch:
while self.outputs.qsize() > 0:
self.outputs.get()
self.outputs.task_done()
self.outputs.join()
self.pool.shutdown(wait=True)
logging.info("shut down pool.")
self.sampler = None
class DataLoaderTest(DataLoaderTrain):
def __init__(self,
data_dir,
filename_pat,
args,
world_size,
worker_rank,
cuda_device_idx,
news_index,
news_scoring,
word_dict,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = -1
self.news_scoring = news_scoring
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
self.sampler.__iter__()
def _produce(self):
# need to reset cuda device in produce thread.
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
# t0 = time.time()
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
# logging.info(f"_produce cost:{time.time()-t0}")
# t0 = time.time()
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def _process(self, batch):
batch_size = len(batch)
batch = [x.decode(encoding="utf-8").split("\t") for x in batch]
user_feature_batch, log_mask_batch, news_feature_batch, label_batch = [], [], [], []
for line in batch:
click_docs = line[3].split()
click_docs, log_mask = self.pad_to_fix_len(
self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_scoring[click_docs]
sample_news = self.trans_to_nindex(
[i.split('-')[0] for i in line[4].split()])
labels = [int(i.split('-')[1]) for i in line[4].split()]
news_feature = self.news_scoring[sample_news]
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(np.array(labels))
if self.enable_gpu:
user_feature_batch = torch.FloatTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
else:
user_feature_batch = torch.FloatTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
return user_feature_batch, log_mask_batch, news_feature_batch, label_batch
| 11,565 | 35.71746 | 130 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/utils.py | import logging
import os
import sys
import torch
import numpy as np
import argparse
import re
from tnlrv3.modeling import TuringNLRv3ForSequenceClassification
from tnlrv3.configuration_tnlrv3 import TuringNLRv3Config
from tnlrv3.tokenization_tnlrv3 import TuringNLRv3Tokenizer
from transformers import BertTokenizer, BertConfig, BertModel
from transformers import RobertaTokenizer, RobertaConfig, RobertaModel
MODEL_CLASSES = {
'tnlrv3': (TuringNLRv3Config, TuringNLRv3ForSequenceClassification, TuringNLRv3Tokenizer),
'bert': (BertConfig, BertModel, BertTokenizer),
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)
}
def word_tokenize(sent):
pat = re.compile(r'[\w]+|[.,!?;|]')
if isinstance(sent, str):
return pat.findall(sent.lower())
else:
return []
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def init_hvd_cuda(enable_hvd=True, enable_gpu=True):
hvd = None
if enable_hvd:
import horovod.torch as hvd
hvd.init()
logging.info(
f"hvd_size:{hvd.size()}, hvd_rank:{hvd.rank()}, hvd_local_rank:{hvd.local_rank()}"
)
hvd_size = hvd.size() if enable_hvd else 1
hvd_rank = hvd.rank() if enable_hvd else 0
hvd_local_rank = hvd.local_rank() if enable_hvd else 0
if enable_gpu:
torch.cuda.set_device(hvd_local_rank)
return hvd_size, hvd_rank, hvd_local_rank
def setuplogger():
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s %(asctime)s] %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
def dump_args(args):
for arg in dir(args):
if not arg.startswith("_"):
logging.info(f"args[{arg}]={getattr(args, arg)}")
def acc(y_true, y_hat):
y_hat = torch.argmax(y_hat, dim=-1)
tot = y_true.shape[0]
hit = torch.sum(y_true == y_hat)
return hit.data.float() * 1.0 / tot
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2**y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def load_matrix(embedding_file_path, word_dict, word_embedding_dim):
embedding_matrix = np.zeros(shape=(len(word_dict) + 1,
word_embedding_dim))
have_word = []
if embedding_file_path is not None:
with open(embedding_file_path, 'rb') as f:
while True:
line = f.readline()
if len(line) == 0:
break
line = line.split()
word = line[0].decode()
if word in word_dict:
index = word_dict[word]
tp = [float(x) for x in line[1:]]
embedding_matrix[index] = np.array(tp)
have_word.append(word)
return embedding_matrix, have_word
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def get_checkpoint(directory, ckpt_name):
ckpt_path = os.path.join(directory, ckpt_name)
if os.path.exists(ckpt_path):
return ckpt_path
else:
return None
| 4,173 | 27.589041 | 94 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/run.py | import numpy as np
import torch
import logging
from tqdm.auto import tqdm
import torch.optim as optim
import utils
import os
from pathlib import Path
import random
from dataloader import DataLoaderTrain, DataLoaderTest
from torch.utils.data import Dataset, DataLoader
from streaming import get_stat, get_worker_files
import pickle
from parameters import parse_args
from preprocess import read_news_bert, get_doc_input_bert
from model_bert import Model
def train(args):
if args.enable_hvd:
import horovod.torch as hvd
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
stat = get_stat(args.train_data_dir, args.filename_pat)
print(stat)
data_paths = get_worker_files(args.train_data_dir,
hvd_rank, hvd_size, args.filename_pat, args.enable_shuffle, 0
)
sample_num = 0
for file in data_paths:
sample_num += stat[file]
logging.info("[{}] contains {} samples {} steps".format(
hvd_rank, sample_num, sample_num // args.batch_size))
news, news_index, category_dict, subcategory_dict = read_news_bert(
os.path.join(args.train_data_dir, 'news.tsv'), args, mode='train'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=-1)
model = Model(args)
teacher_embs = []
model_dict = model.state_dict()
loaded_key = []
remain_key = list(model_dict.keys())
for i, (teacher_ckpt, teacher_emb) in enumerate(zip(args.teacher_ckpts, args.teacher_emb_paths)):
ckpt = torch.load(teacher_ckpt, map_location='cpu')
teacher_dict = ckpt["model_state_dict"]
for k, v in teacher_dict.items():
if not k.startswith('user_encoder'):
continue
key = '.'.join(['teachers', str(i)] + k.split('.')[1:])
model_dict[key].copy_(v)
loaded_key.append(key)
remain_key.remove(key)
del ckpt
with open(teacher_emb, 'rb') as f:
teacher_embs.append(pickle.load(f))
if args.use_pretrain_model:
ckpt = torch.load(args.pretrain_model_path, map_location='cpu')
pretrained_dict = ckpt["model_state_dict"]
for k, v in pretrained_dict.items():
if not k.startswith('student'):
continue
# key = 'student.' + k
model_dict[k].copy_(v)
loaded_key.append(k)
remain_key.remove(k)
model_dict.update(model_dict)
model.load_state_dict(model_dict)
if hvd_rank == 0:
logging.info(f"loaded teacher models: {args.teacher_ckpts}")
print(f'{len(loaded_key)} loaded parameters:')
for k in loaded_key:
print(f'\t{k}')
print(f'{len(remain_key)} initialized parameters:')
for k in remain_key:
print(f'\t{k}')
torch.cuda.empty_cache()
for param in model.teachers.parameters():
param.requires_grad = False
if args.model_type == 'tnlrv3':
for param in model.student.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.student.news_encoder.bert_model.bert.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
else:
for param in model.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.news_encoder.bert_model.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
word_dict = None
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
checkpoint = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
logging.info(f"Model loaded from {ckpt_path}")
if args.enable_gpu:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, amsgrad=True)
if hvd_rank == 0:
print(model)
for name, param in model.named_parameters():
print(name, param.requires_grad)
if args.enable_hvd:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
compression = hvd.Compression.none
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Average)
dataloader = DataLoaderTrain(
teacher_embs=teacher_embs,
news_index=news_index,
news_combined=news_combined,
word_dict=word_dict,
data_dir=args.train_data_dir,
filename_pat=args.filename_pat,
args=args,
world_size=hvd_size,
worker_rank=hvd_rank,
cuda_device_idx=hvd_local_rank,
enable_prefetch=True,
enable_shuffle=True,
enable_gpu=args.enable_gpu,
)
if args.tensorboard is not None:
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir=f'{args.tensorboard}/worker_{hvd_rank}')
logging.info('Training...')
g_step = 0
for ep in range(args.start_epoch, args.epochs):
LOSS, ACC = 0.0, 0.0
for cnt, (log_ids, log_mask, input_ids, targets, teacher_history, teacher_candidate) in enumerate(dataloader):
if cnt > args.max_steps_per_epoch:
break
total_loss, distill_loss, emb_loss, target_loss, y_student = model(
log_ids, log_mask, input_ids, targets, teacher_history, teacher_candidate)
accuracy = utils.acc(targets, y_student)
LOSS += total_loss
ACC += accuracy
if args.tensorboard:
writer.add_scalar("total_loss", total_loss, g_step)
writer.add_scalar("emb_loss", emb_loss, g_step)
writer.add_scalar("distill_loss", distill_loss, g_step)
writer.add_scalar("target_loss", target_loss, g_step)
writer.add_scalar("acc", accuracy, g_step)
writer.add_scalar("W", model.W, g_step)
g_step += 1
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if cnt % args.log_steps == 0:
logging.info(
'[{}] Ed: {}, train_loss: {:.5f}, acc: {:.5f}'.format(
hvd_rank, cnt * args.batch_size, LOSS.data / cnt, ACC / cnt))
print(ep + 1)
# save model last of epoch
if hvd_rank == 0:
ckpt_path = os.path.join(args.model_dir, f'epoch-{ep+1}.pt')
torch.save(
{
'model_state_dict': model.state_dict(),
'category_dict': category_dict,
'word_dict': word_dict,
'subcategory_dict': subcategory_dict
}, ckpt_path)
logging.info(f"Model saved to {ckpt_path}")
dataloader.join()
def test(args):
if args.enable_hvd:
import horovod.torch as hvd
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
assert ckpt_path is not None, 'No ckpt found'
checkpoint = torch.load(ckpt_path)
subcategory_dict = checkpoint['subcategory_dict']
category_dict = checkpoint['category_dict']
word_dict = checkpoint['word_dict']
model = Model(args)
if args.enable_gpu:
model.cuda()
model.load_state_dict(checkpoint['model_state_dict'])
logging.info(f"Model loaded from {ckpt_path}")
if args.enable_hvd:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
model.eval()
torch.set_grad_enabled(False)
news, news_index = read_news_bert(
os.path.join(args.test_data_dir, 'news.tsv'), args, mode='test'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=1)
class NewsDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.data.shape[0]
def news_collate_fn(arr):
arr = torch.LongTensor(arr)
return arr
news_dataset = NewsDataset(news_combined)
news_dataloader = DataLoader(news_dataset,
batch_size=args.batch_size * 4,
num_workers=args.num_workers,
collate_fn=news_collate_fn)
news_scoring = []
with torch.no_grad():
for input_ids in tqdm(news_dataloader):
input_ids = input_ids.cuda()
news_vec = model.student.news_encoder(input_ids)
news_vec = news_vec.to(torch.device("cpu")).detach().numpy()
news_scoring.extend(news_vec)
news_scoring = np.array(news_scoring)
logging.info("news scoring num: {}".format(news_scoring.shape[0]))
doc_sim = 0
for _ in tqdm(range(1000000)):
i = random.randrange(1, len(news_scoring))
j = random.randrange(1, len(news_scoring))
if i != j:
doc_sim += np.dot(news_scoring[i], news_scoring[j]) / (
np.linalg.norm(news_scoring[i]) * np.linalg.norm(news_scoring[j]))
print(f'=== doc-sim: {doc_sim / 1000000} ===')
dataloader = DataLoaderTest(
news_index=news_index,
news_scoring=news_scoring,
word_dict=word_dict,
data_dir=args.test_data_dir,
filename_pat=args.filename_pat,
args=args,
world_size=hvd_size,
worker_rank=hvd_rank,
cuda_device_idx=hvd_local_rank,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=args.enable_gpu,
)
from metrics import roc_auc_score, ndcg_score, mrr_score
AUC = []
MRR = []
nDCG5 = []
nDCG10 = []
def print_metrics(hvd_local_rank, cnt, x):
logging.info("[{}] Ed: {}: {}".format(hvd_local_rank, cnt,
'\t'.join(["{:0.2f}".format(i * 100) for i in x])))
def get_mean(arr):
return [np.array(i).mean() for i in arr]
def get_sum(arr):
return [np.array(i).sum() for i in arr]
local_sample_num = 0
for cnt, (log_vecs, log_mask, news_vecs, labels) in enumerate(dataloader):
local_sample_num += log_vecs.shape[0]
if args.enable_gpu:
log_vecs = log_vecs.cuda(non_blocking=True)
log_mask = log_mask.cuda(non_blocking=True)
user_vecs = model.student.user_encoder(log_vecs, log_mask).to(
torch.device("cpu")).detach().numpy()
for user_vec, news_vec, label in zip(user_vecs, news_vecs, labels):
if label.mean() == 0 or label.mean() == 1:
continue
score = np.dot(news_vec, user_vec)
auc = roc_auc_score(label, score)
mrr = mrr_score(label, score)
ndcg5 = ndcg_score(label, score, k=5)
ndcg10 = ndcg_score(label, score, k=10)
AUC.append(auc)
MRR.append(mrr)
nDCG5.append(ndcg5)
nDCG10.append(ndcg10)
if cnt % args.log_steps == 0:
print_metrics(hvd_rank, local_sample_num,
get_mean([AUC, MRR, nDCG5, nDCG10]))
# stop scoring
dataloader.join()
logging.info('[{}] local_sample_num: {}'.format(
hvd_rank, local_sample_num))
total_sample_num = hvd.allreduce(
torch.tensor(local_sample_num), op=hvd.Sum)
local_metrics_sum = get_sum([AUC, MRR, nDCG5, nDCG10])
total_metrics_sum = hvd.allreduce(torch.tensor(
local_metrics_sum, dtype=float), op=hvd.Sum)
if hvd_rank == 0:
print_metrics(hvd_rank, total_sample_num,
total_metrics_sum / total_sample_num)
def get_teacher_emb(args):
from model_bert_2 import ModelBert
import pickle
if args.enable_hvd:
import horovod.torch as hvd
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
news, news_index, category_dict, subcategory_dict = read_news_bert(
os.path.join(args.train_data_dir, 'news.tsv'), args, mode='train'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=-1)
class NewsDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.data.shape[0]
def news_collate_fn(arr):
arr = torch.LongTensor(arr)
return arr
for ckpt_path, teacher_emb in zip(args.teacher_ckpts, args.teacher_emb_paths):
model = ModelBert(args)
ckpt = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['model_state_dict'])
logging.info(f"loaded teacher model: {ckpt_path}")
del ckpt
torch.cuda.empty_cache()
model = model.cuda()
model.eval()
torch.set_grad_enabled(False)
news_dataset = NewsDataset(news_combined)
news_dataloader = DataLoader(news_dataset,
batch_size=args.batch_size * 4,
num_workers=args.num_workers,
collate_fn=news_collate_fn)
news_scoring = []
with torch.no_grad():
for input_ids in tqdm(news_dataloader):
input_ids = input_ids.cuda()
news_vec = model.news_encoder(input_ids)
news_vec = news_vec.to(torch.device("cpu")).detach().numpy()
news_scoring.extend(news_vec)
news_scoring = np.array(news_scoring)
logging.info("news scoring num: {}".format(news_scoring.shape[0]))
doc_sim = 0
for _ in tqdm(range(1000000)):
i = random.randrange(1, len(news_scoring))
j = random.randrange(1, len(news_scoring))
if i != j:
doc_sim += np.dot(news_scoring[i], news_scoring[j]) / (
np.linalg.norm(news_scoring[i]) * np.linalg.norm(news_scoring[j]))
print(f'=== doc-sim: {doc_sim / 1000000} ===')
with open(teacher_emb, 'wb') as f:
pickle.dump(news_scoring, f)
logging.info(f"teacher embedding saved at {teacher_emb}")
if __name__ == "__main__":
utils.setuplogger()
args = parse_args()
Path(args.model_dir).mkdir(parents=True, exist_ok=True)
if 'train' in args.mode:
train(args)
if 'test' in args.mode:
test(args)
if 'get_teacher_emb' in args.mode:
get_teacher_emb(args)
| 15,933 | 32.687104 | 118 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/parameters.py | import argparse
import utils
import logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode",
type=str,
default="train",
choices=['train', 'test', 'get_teacher_emb'])
parser.add_argument(
"--train_data_dir",
type=str,
default="../MIND/MINDlarge_train",
)
parser.add_argument(
"--test_data_dir",
type=str,
default="../MIND/MINDlarge_test",
)
parser.add_argument("--filename_pat", type=str, default="behaviors_np4_*.tsv")
parser.add_argument("--model_dir", type=str, default='./model')
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--npratio", type=int, default=4)
parser.add_argument("--enable_gpu", type=utils.str2bool, default=True)
parser.add_argument("--enable_hvd", type=utils.str2bool, default=True)
parser.add_argument("--enable_shuffle", type=utils.str2bool, default=True)
parser.add_argument("--shuffle_buffer_size", type=int, default=10000)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--filter_num", type=int, default=3)
parser.add_argument("--log_steps", type=int, default=100)
# model training
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--num_words_title", type=int, default=20)
parser.add_argument("--num_words_abstract", type=int, default=50)
parser.add_argument("--num_words_body", type=int, default=100)
parser.add_argument(
"--user_log_length",
type=int,
default=50,
)
parser.add_argument(
"--word_embedding_dim",
type=int,
default=300,
)
parser.add_argument(
"--glove_embedding_path",
type=str,
default='./glove.840B.300d.txt',
)
parser.add_argument("--freeze_embedding", type=utils.str2bool, default=False)
parser.add_argument(
"--news_dim",
type=int,
default=64,
)
parser.add_argument(
"--news_query_vector_dim",
type=int,
default=200,
)
parser.add_argument(
"--user_query_vector_dim",
type=int,
default=200,
)
parser.add_argument(
"--num_attention_heads",
type=int,
default=20,
)
parser.add_argument("--user_log_mask", type=utils.str2bool, default=True)
parser.add_argument("--drop_rate", type=float, default=0.2)
parser.add_argument("--save_steps", type=int, default=1000)
parser.add_argument("--max_steps_per_epoch", type=int, default=1000000)
parser.add_argument("--load_ckpt_name",
type=str,
default=None,
help="choose which ckpt to load and test")
# bert
parser.add_argument("--apply_bert", type=utils.str2bool, default=False)
parser.add_argument("--model_type", default="bert", type=str)
parser.add_argument("--do_lower_case", type=utils.str2bool, default=True)
parser.add_argument("--model_name", default="../bert-base-uncased/pytorch_model.bin", type=str)
parser.add_argument("--config_name", default="../bert-base-uncased/config.json", type=str)
parser.add_argument("--tokenizer_name", default="../bert-base-uncased/vocab.txt", type=str)
parser.add_argument("--num_hidden_layers", type=int, default=8)
parser.add_argument("--bert_trainable_layer",
type=int,
nargs='+',
default=[],
choices=list(range(12)))
parser.add_argument("--model", type=str, default=None)
parser.add_argument("--pooling", type=str, default='att')
parser.add_argument("--start_epoch", type=int, default=0)
parser.add_argument("--use_pretrain_model", type=utils.str2bool, default=False)
parser.add_argument("--pretrain_model_path", type=str, default=None)
parser.add_argument("--pretrain_lr", type=float, default=0.00001)
parser.add_argument("--num_teacher_layers", type=int, default=12)
parser.add_argument("--num_student_layers", type=int, default=4)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--coef", type=float, default=1.0)
parser.add_argument("--tensorboard", type=str, default=None)
parser.add_argument("--teacher_ckpts", type=str, nargs='+', default=[])
parser.add_argument("--teacher_emb_paths", type=str, nargs='+', default=[])
parser.add_argument("--num_teachers", type=int, default=4)
args = parser.parse_args()
logging.info(args)
return args
if __name__ == "__main__":
args = parse_args()
| 4,778 | 37.232 | 99 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/model_bert_2.py | import numpy as np
import torch
from torch import nn
from utils import MODEL_CLASSES
class AttentionPooling(nn.Module):
def __init__(self, emb_size, hidden_size):
super(AttentionPooling, self).__init__()
self.att_fc1 = nn.Linear(emb_size, hidden_size)
self.att_fc2 = nn.Linear(hidden_size, 1)
def forward(self, x, attn_mask=None):
"""
Args:
x: batch_size, candidate_size, emb_dim
attn_mask: batch_size, candidate_size
Returns:
(shape) batch_size, emb_dim
"""
bz = x.shape[0]
e = self.att_fc1(x)
e = nn.Tanh()(e)
alpha = self.att_fc2(e)
alpha = torch.exp(alpha)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)
x = torch.bmm(x.permute(0, 2, 1), alpha).squeeze(dim=-1)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask=None):
'''
Q: batch_size, n_head, candidate_num, d_k
K: batch_size, n_head, candidate_num, d_k
V: batch_size, n_head, candidate_num, d_v
attn_mask: batch_size, n_head, candidate_num
Return: batch_size, n_head, candidate_num, d_v
'''
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
scores = torch.exp(scores)
if attn_mask is not None:
scores = scores * attn_mask.unsqueeze(dim=-2)
attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)
context = torch.matmul(attn, V)
return context
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_model, n_heads, d_k, d_v):
super(MultiHeadSelfAttention, self).__init__()
self.d_model = d_model
self.n_heads = n_heads
self.d_k = d_k
self.d_v = d_v
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, Q, K, V, mask=None):
'''
Q: batch_size, candidate_num, d_model
K: batch_size, candidate_num, d_model
V: batch_size, candidate_num, d_model
mask: batch_size, candidate_num
'''
batch_size = Q.shape[0]
if mask is not None:
mask = mask.unsqueeze(dim=1).expand(-1, self.n_heads, -1)
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads,
self.d_k).transpose(1, 2)
k_s = self.W_K(K).view(batch_size, -1, self.n_heads,
self.d_k).transpose(1, 2)
v_s = self.W_V(V).view(batch_size, -1, self.n_heads,
self.d_v).transpose(1, 2)
context = self.scaled_dot_product_attn(q_s, k_s, v_s, mask)
output = context.transpose(1, 2).contiguous().view(
batch_size, -1, self.n_heads * self.d_v)
return output
class NewsEncoder(nn.Module):
def __init__(self, args):
super(NewsEncoder, self).__init__()
self.pooling = args.pooling
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
self.output_index = 3 if args.model_type == 'tnlrv3' else 2
self.bert_config = config_class.from_pretrained(
args.config_name,
output_hidden_states=True,
num_hidden_layers=args.num_hidden_layers)
self.bert_model = model_class.from_pretrained(
args.model_name, config=self.bert_config)
if args.pooling == 'att':
self.attn = AttentionPooling(
self.bert_config.hidden_size, args.news_query_vector_dim)
self.dense = nn.Linear(self.bert_config.hidden_size, args.news_dim)
def forward(self, x):
'''
x: batch_size, word_num * 2
mask: batch_size, word_num
'''
batch_size, num_words = x.shape
num_words = num_words // 2
text_ids = torch.narrow(x, 1, 0, num_words)
text_attmask = torch.narrow(x, 1, num_words, num_words)
word_vecs = self.bert_model(text_ids, text_attmask)[
self.output_index][self.bert_config.num_hidden_layers]
if self.pooling == 'cls':
news_vec = torch.narrow(word_vecs, 1, 0, 1).squeeze(dim=1)
elif self.pooling == 'att':
news_vec = self.attn(word_vecs)
else:
news_vec = torch.mean(word_vecs, dim=1)
news_vec = self.dense(news_vec)
return news_vec
class UserEncoder(nn.Module):
def __init__(self, args):
super(UserEncoder, self).__init__()
self.args = args
if args.model == 'NRMS':
self.multi_head_self_attn = MultiHeadSelfAttention(
args.news_dim, args.num_attention_heads, 16, 16)
self.attn = AttentionPooling(
args.num_attention_heads * 16, args.user_query_vector_dim)
else:
self.attn = AttentionPooling(
args.news_dim, args.user_query_vector_dim)
self.pad_doc = nn.Parameter(torch.empty(
1, args.news_dim).uniform_(-1, 1)).type(torch.FloatTensor)
def forward(self, news_vecs, log_mask=None):
'''
news_vecs: batch_size, history_num, news_dim
log_mask: batch_size, history_num
'''
bz = news_vecs.shape[0]
if self.args.user_log_mask:
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(
news_vecs, news_vecs, news_vecs, log_mask)
user_vec = self.attn(news_vecs, log_mask)
else:
user_vec = self.attn(news_vecs, log_mask)
else:
padding_doc = self.pad_doc.unsqueeze(dim=0).expand(
bz, self.args.user_log_length, -1)
news_vecs = news_vecs * \
log_mask.unsqueeze(dim=-1) + padding_doc * \
(1 - log_mask.unsqueeze(dim=-1))
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(
news_vecs, news_vecs, news_vecs)
user_vec = self.attn(news_vecs)
else:
user_vec = self.attn(news_vecs)
return user_vec
class ModelBert(torch.nn.Module):
def __init__(self, args):
super(ModelBert, self).__init__()
self.args = args
self.news_encoder = NewsEncoder(args)
self.user_encoder = UserEncoder(args)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, history, history_mask, candidate, label):
'''
history: batch_size, history_length, num_word_title * 2
history_mask: batch_size, history_length
candidate: batch_size, 1+K, num_word_title * 2
label: batch_size, 1+K
'''
batch_size = history.shape[0]
input_id_num = history.shape[-1]
candidate_news = candidate.reshape(-1, input_id_num)
candidate_news_vecs = self.news_encoder(
candidate_news).reshape(batch_size, -1, self.args.news_dim)
history_news = history.reshape(-1, input_id_num)
history_news_vecs = self.news_encoder(
history_news).reshape(-1, self.args.user_log_length, self.args.news_dim)
user_vec = self.user_encoder(history_news_vecs, history_mask)
score = torch.bmm(candidate_news_vecs,
user_vec.unsqueeze(dim=-1)).squeeze(dim=-1)
loss = self.loss_fn(score, label)
return loss, score
| 8,043 | 36.588785 | 84 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/model_bert.py | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from utils import MODEL_CLASSES
class AttentionPooling(nn.Module):
def __init__(self, emb_size, hidden_size):
super(AttentionPooling, self).__init__()
self.att_fc1 = nn.Linear(emb_size, hidden_size)
self.att_fc2 = nn.Linear(hidden_size, 1)
def forward(self, x, attn_mask=None):
"""
Args:
x: batch_size, candidate_size, emb_dim
attn_mask: batch_size, candidate_size
Returns:
(shape) batch_size, emb_dim
"""
bz = x.shape[0]
e = self.att_fc1(x)
e = nn.Tanh()(e)
alpha = self.att_fc2(e)
alpha = torch.exp(alpha)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)
x = torch.bmm(x.permute(0, 2, 1), alpha).squeeze(dim=-1)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask=None):
'''
Q: batch_size, n_head, candidate_num, d_k
K: batch_size, n_head, candidate_num, d_k
V: batch_size, n_head, candidate_num, d_v
attn_mask: batch_size, n_head, candidate_num
Return: batch_size, n_head, candidate_num, d_v
'''
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
scores = torch.exp(scores)
if attn_mask is not None:
scores = scores * attn_mask.unsqueeze(dim=-2)
attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)
context = torch.matmul(attn, V)
return context
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_model, n_heads, d_k, d_v):
super(MultiHeadSelfAttention, self).__init__()
self.d_model = d_model
self.n_heads = n_heads
self.d_k = d_k
self.d_v = d_v
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, Q, K, V, mask=None):
'''
Q: batch_size, candidate_num, d_model
K: batch_size, candidate_num, d_model
V: batch_size, candidate_num, d_model
mask: batch_size, candidate_num
'''
batch_size = Q.shape[0]
if mask is not None:
mask = mask.unsqueeze(dim=1).expand(-1, self.n_heads, -1)
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
context = self.scaled_dot_product_attn(q_s, k_s, v_s, mask)
output = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
return output
class NewsEncoder(nn.Module):
def __init__(self, args, is_teacher):
super(NewsEncoder, self).__init__()
self.pooling = args.pooling
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
self.output_index = 3 if args.model_type == 'tnlrv3' else 2
self.bert_config = config_class.from_pretrained(
args.config_name,
output_hidden_states=True,
num_hidden_layers=args.num_teacher_layers if is_teacher else args.num_student_layers)
self.bert_model = model_class.from_pretrained(args.model_name, config=self.bert_config)
if args.pooling == 'att':
self.attn = AttentionPooling(self.bert_config.hidden_size, args.news_query_vector_dim)
self.dense = nn.Linear(self.bert_config.hidden_size, args.news_dim)
def forward(self, x):
'''
x: batch_size, word_num * 2
mask: batch_size, word_num
'''
batch_size, num_words = x.shape
num_words = num_words // 2
text_ids = torch.narrow(x, 1, 0, num_words)
text_attmask = torch.narrow(x, 1, num_words, num_words)
word_vecs = self.bert_model(
text_ids, text_attmask)[self.output_index][self.bert_config.num_hidden_layers]
if self.pooling == 'cls':
news_vec = torch.narrow(word_vecs, 1, 0, 1).squeeze(dim=1)
elif self.pooling == 'att':
news_vec = self.attn(word_vecs)
else:
news_vec = torch.mean(word_vecs, dim=1)
news_vec = self.dense(news_vec)
return news_vec
class UserEncoder(nn.Module):
def __init__(self, args):
super(UserEncoder, self).__init__()
self.args = args
if args.model == 'NRMS':
self.multi_head_self_attn = MultiHeadSelfAttention(args.news_dim,
args.num_attention_heads, 16, 16)
self.attn = AttentionPooling(args.num_attention_heads * 16, args.user_query_vector_dim)
else:
self.attn = AttentionPooling(args.news_dim, args.user_query_vector_dim)
self.pad_doc = nn.Parameter(torch.empty(1,
args.news_dim).uniform_(-1,
1)).type(torch.FloatTensor)
def forward(self, news_vecs, log_mask=None):
'''
news_vecs: batch_size, history_num, news_dim
log_mask: batch_size, history_num
'''
bz = news_vecs.shape[0]
if self.args.user_log_mask:
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(news_vecs, news_vecs, news_vecs, log_mask)
user_vec = self.attn(news_vecs, log_mask)
else:
user_vec = self.attn(news_vecs, log_mask)
else:
padding_doc = self.pad_doc.unsqueeze(dim=0).expand(bz, self.args.user_log_length, -1)
news_vecs = news_vecs * log_mask.unsqueeze(
dim=-1) + padding_doc * (1 - log_mask.unsqueeze(dim=-1))
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(news_vecs, news_vecs, news_vecs)
user_vec = self.attn(news_vecs)
else:
user_vec = self.attn(news_vecs)
return user_vec
class ModelBert(torch.nn.Module):
def __init__(self, args, is_teacher):
super(ModelBert, self).__init__()
self.args = args
self.news_encoder = NewsEncoder(args, is_teacher)
self.user_encoder = UserEncoder(args)
def forward(self, history, history_mask, candidate):
'''
history: batch_size, history_length, num_word_title * 2
history_mask: batch_size, history_length
candidate: batch_size, 1+K, num_word_title * 2
'''
batch_size = history.shape[0]
input_id_num = history.shape[-1]
candidate_news = candidate.reshape(-1, input_id_num)
candidate_news_vecs = self.news_encoder(candidate_news).reshape(
batch_size, -1, self.args.news_dim)
history_news = history.reshape(-1, input_id_num)
history_news_vecs = self.news_encoder(history_news).reshape(-1, self.args.user_log_length,
self.args.news_dim)
user_vec = self.user_encoder(history_news_vecs, history_mask)
score = torch.bmm(candidate_news_vecs, user_vec.unsqueeze(dim=-1)).squeeze(dim=-1)
return score, history_news_vecs, candidate_news_vecs, user_vec
def kd_ce_loss(logits_S, logits_T, temperature=1):
'''
Calculate the cross entropy between logits_S and logits_T
:param logits_S: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param logits_T: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param temperature: A float or a tensor of shape (batch_size, length) or (batch_size,)
'''
beta_logits_T = logits_T / temperature
beta_logits_S = logits_S / temperature
p_T = F.softmax(beta_logits_T, dim=-1)
loss = -(p_T * F.log_softmax(beta_logits_S, dim=-1)).sum(dim=-1).mean()
return loss
def hid_mse_loss(state_S, state_T, mask=None, reduce=True):
'''
* Calculates the mse loss between `state_S` and `state_T`, which are the hidden state of the models.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.Tensor state_S: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor state_T: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if mask is None:
if not reduce:
loss = F.mse_loss(state_S, state_T, reduction='none').mean(dim=-1)
else:
loss = F.mse_loss(state_S, state_T)
else:
if not reduce:
loss = (F.mse_loss(state_S, state_T, reduction='none') *
mask.unsqueeze(-1)).mean(dim=-1)
else:
valid_count = mask.sum() * state_S.size(-1)
loss = (F.mse_loss(state_S, state_T, reduction='none') *
mask.unsqueeze(-1)).sum() / valid_count
return loss
class Model(torch.nn.Module):
def __init__(self, args):
super(Model, self).__init__()
self.args = args
self.teachers = nn.ModuleList([UserEncoder(args) for _ in range(args.num_teachers)])
self.student = ModelBert(args, is_teacher=False)
self.target_loss_fn = nn.CrossEntropyLoss()
self.transform_matrix = nn.ModuleList(
[nn.Linear(args.news_dim, args.news_dim) for _ in range(args.num_teachers)])
for module in self.transform_matrix:
nn.init.xavier_uniform_(module.weight, gain=1.)
nn.init.constant_(module.bias, 0.0)
def forward(self, history, history_mask, candidate, label, teacher_history_embs,
teacher_candidate_embs):
'''
teacher_history_embs: [(batch_size, user_log_length, news_emb) * num_teachers]
teacher_candidate_emb: [(batch_size, 1+K, news_emb) * num_teachers]
'''
student_score, student_history_emb, student_candidate_emb, student_user_emb = self.student(
history, history_mask, candidate)
student_news_emb = torch.cat([student_history_emb, student_candidate_emb], dim=1)
target_loss = self.target_loss_fn(student_score, label)
teacher_scores, teacher_losses = [], []
NE_MSEs, UE_MSEs = [], []
for i, (teacher_history,
teacher_candidate) in enumerate(zip(teacher_history_embs, teacher_candidate_embs)):
teacher_news_emb = torch.cat([teacher_history, teacher_candidate], dim=1)
teacher_news_emb_proj = self.transform_matrix[i](teacher_news_emb)
NE_MSEs.append(
hid_mse_loss(student_news_emb, teacher_news_emb_proj, reduce=False).mean(dim=-1))
teacher_user_vector = self.teachers[i](teacher_history, history_mask)
teacher_user_vector_proj = self.transform_matrix[i](teacher_user_vector)
UE_MSEs.append(hid_mse_loss(student_user_emb, teacher_user_vector_proj, reduce=False))
score = torch.bmm(teacher_candidate,
teacher_user_vector.unsqueeze(dim=-1)).squeeze(dim=-1)
teacher_loss = F.cross_entropy(score, label, reduction='none')
teacher_scores.append(score)
teacher_losses.append(teacher_loss)
teacher_losses = -torch.stack(teacher_losses, dim=-1)
teacher_weights = F.softmax(teacher_losses, dim=-1)
teacher_scores = torch.stack(teacher_scores, dim=-1)
teacher_scores = torch.bmm(teacher_scores,
teacher_weights.unsqueeze(dim=-1)).squeeze(dim=-1)
distill_loss = kd_ce_loss(student_score, teacher_scores, self.args.temperature)
NE_MSEs = torch.stack(NE_MSEs, dim=-1)
UE_MSEs = torch.stack(UE_MSEs, dim=-1)
emb_loss = (NE_MSEs * teacher_weights).sum(dim=-1).mean() + (UE_MSEs * teacher_weights).sum(
dim=-1).mean()
total_loss = distill_loss + self.args.coef * target_loss + emb_loss
return total_loss, distill_loss, emb_loss, target_loss, student_score | 12,966 | 41.375817 | 141 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/convert_state_dict.py | import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def get_checkpoint_from_transformer_cache(
archive_file, pretrained_model_name_or_path, pretrained_model_archive_map,
cache_dir, force_download, proxies, resume_download,
):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download,
proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file)
else:
msg = "Model name '{}' was not found in model name list ({}). " \
"We assumed '{}' was a path or url to model weight files named one of {} but " \
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
', '.join(pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME])
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
return torch.load(resolved_archive_file, map_location='cpu')
def load_model(state_dict):
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.endswith("attention.self.q_bias"):
new_state_dict[key.replace(
"attention.self.q_bias", "attention.self.query.bias")] = value.view(-1)
elif key.endswith("attention.self.v_bias"):
new_state_dict[key.replace(
"attention.self.v_bias", "attention.self.value.bias")] = value.view(-1)
new_state_dict[key.replace(
"attention.self.v_bias", "attention.self.key.bias")] = torch.zeros_like(value.view(-1))
elif key.endswith("attention.self.qkv_linear.weight"):
l, _ = value.size()
assert l % 3 == 0
l = l // 3
q, k, v = torch.split(
value, split_size_or_sections=(l, l, l), dim=0)
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.query.weight")] = q
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.key.weight")] = k
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.value.weight")] = v
elif key == "bert.encoder.rel_pos_bias.weight":
new_state_dict["bert.rel_pos_bias.weight"] = value
else:
new_state_dict[key] = value
del state_dict
return new_state_dict
state_dict_convert = {
'tnlrv3': load_model,
}
| 3,162 | 40.077922 | 109 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/s2s_loader.py | import numpy as np
from random import randint
import logging
import torch
import torch.utils.data
logger = logging.getLogger(__name__)
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.word_subsample_prb = None
self.sp_prob = None
self.pieces_dir = None
self.vocab_words = None
self.pieces_threshold = 10
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def __call__(self, instance):
raise NotImplementedError
class Preprocess4Seq2seqDecoder(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128,
mode="s2s", pos_shift=False, source_type_id=0, target_type_id=1,
cls_token='[CLS]', sep_token='[SEP]', pad_token='[PAD]'):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.task_idx = 3 # relax projection layer for different tasks
assert mode in ("s2s", "l2r")
self.mode = mode
self.max_tgt_length = max_tgt_length
self.pos_shift = pos_shift
self.delta = 1 if pos_shift else 2
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.cc = 0
def __call__(self, instance):
tokens_a, max_a_len = instance
padded_tokens_a = [self.cls_token] + tokens_a
if not self.pos_shift:
padded_tokens_a = padded_tokens_a + [self.sep_token]
assert len(padded_tokens_a) <= max_a_len + self.delta
if max_a_len + self.delta > len(padded_tokens_a):
padded_tokens_a += [self.pad_token] * \
(max_a_len + self.delta - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + self.delta
max_len_in_batch = min(self.max_tgt_length +
max_a_len + self.delta, self.max_len)
tokens = padded_tokens_a
segment_ids = [self.source_type_id] * (len(padded_tokens_a)) \
+ [self.target_type_id] * (max_len_in_batch - len(padded_tokens_a))
mask_qkv = None
position_ids = []
for i in range(len(tokens_a) + self.delta):
position_ids.append(i)
for i in range(len(tokens_a) + self.delta, max_a_len + self.delta):
position_ids.append(0)
for i in range(max_a_len + self.delta, max_len_in_batch):
position_ids.append(
i - (max_a_len + self.delta) + len(tokens_a) + self.delta)
# Token Indexing
input_ids = self.indexer(tokens)
self.cc += 1
if self.cc < 20:
# print("Vocab size = %d" % len(self.vocab_words))
# for tk_id in input_ids:
# print(u"trans %d -> %s" % (tk_id, self.vocab_words[tk_id]))
logger.info(u"Input src = %s" % " ".join(
(self.vocab_words[tk_id]) for tk_id in input_ids))
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
if self.mode == "s2s":
input_mask[:, :len(tokens_a) + self.delta].fill_(1)
else:
st, end = 0, len(tokens_a) + self.delta
input_mask[st:end, st:end].copy_(
self._tril_matrix[:end, :end])
input_mask[end:, :len(tokens_a) + self.delta].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)
| 5,318 | 32.878981 | 90 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/modeling.py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from transformers.modeling_bert import \
BertPreTrainedModel, BertSelfOutput, BertIntermediate, \
BertOutput, BertPredictionHeadTransform, BertPooler
from transformers.file_utils import WEIGHTS_NAME
from tnlrv3.config import TuringNLRv3ForSeq2SeqConfig
from tnlrv3.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert
logger = logging.getLogger(__name__)
BertLayerNorm = torch.nn.LayerNorm
TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP = {
}
class TuringNLRv3PreTrainedModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = TuringNLRv3ForSeq2SeqConfig
supported_convert_pretrained_model_archive_map = {
"tnlrv3": TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP,
}
base_model_prefix = "TuringNLRv3_for_seq2seq"
pretrained_model_archive_map = {
**TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP,
}
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, reuse_position_embedding=None,
replace_prefix=None, *model_args, **kwargs,
):
model_type = kwargs.pop('model_type', 'tnlrv3')
if model_type is not None and "state_dict" not in kwargs:
if model_type in cls.supported_convert_pretrained_model_archive_map:
pretrained_model_archive_map = cls.supported_convert_pretrained_model_archive_map[model_type]
if pretrained_model_name_or_path in pretrained_model_archive_map:
state_dict = get_checkpoint_from_transformer_cache(
archive_file=pretrained_model_archive_map[pretrained_model_name_or_path],
pretrained_model_name_or_path=pretrained_model_name_or_path,
pretrained_model_archive_map=pretrained_model_archive_map,
cache_dir=kwargs.get("cache_dir", None), force_download=kwargs.get("force_download", None),
proxies=kwargs.get("proxies", None), resume_download=kwargs.get("resume_download", None),
)
state_dict = state_dict_convert[model_type](state_dict)
kwargs["state_dict"] = state_dict
logger.info("Load HF ckpts")
elif os.path.isfile(pretrained_model_name_or_path):
state_dict = torch.load(pretrained_model_name_or_path, map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](state_dict)
logger.info("Load local ckpts")
elif os.path.isdir(pretrained_model_name_or_path):
state_dict = torch.load(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME), map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](state_dict)
logger.info("Load local ckpts")
else:
raise RuntimeError("Not fined the pre-trained checkpoint !")
if kwargs["state_dict"] is None:
logger.info("TNLRv3 does't support the model !")
raise NotImplementedError()
config = kwargs["config"]
state_dict = kwargs["state_dict"]
# initialize new position embeddings (From Microsoft/UniLM)
_k = 'bert.embeddings.position_embeddings.weight'
if _k in state_dict:
if config.max_position_embeddings > state_dict[_k].shape[0]:
logger.info("Resize > position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(mean=0.0, std=config.initializer_range)
max_range = config.max_position_embeddings if reuse_position_embedding else old_vocab_size
shift = 0
while shift < max_range:
delta = min(old_vocab_size, max_range - shift)
new_postion_embedding.data[shift: shift + delta, :] = state_dict[_k][:delta, :]
logger.info(" CP [%d ~ %d] into [%d ~ %d] " % (0, delta, shift, shift + delta))
shift += delta
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
elif config.max_position_embeddings < state_dict[_k].shape[0]:
logger.info("Resize < position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(mean=0.0, std=config.initializer_range)
new_postion_embedding.data.copy_(state_dict[_k][:config.max_position_embeddings, :])
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
if replace_prefix is not None:
new_state_dict = {}
for key in state_dict:
if key.startswith(replace_prefix):
new_state_dict[key[len(replace_prefix):]] = state_dict[key]
else:
new_state_dict[key] = state_dict[key]
kwargs["state_dict"] = new_state_dict
del state_dict
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
fix_word_embedding = getattr(config, "fix_word_embedding", None)
if fix_word_embedding:
self.word_embeddings.weight.requires_grad = False
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
else:
self.token_type_embeddings = None
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
if self.token_type_embeddings:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings, position_ids
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def multi_head_attention(self, query, key, value, attention_mask, rel_pos):
query_layer = self.transpose_for_scores(query)
key_layer = self.transpose_for_scores(key)
value_layer = self.transpose_for_scores(value)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs) if self.output_attentions else (context_layer,)
def forward(self, hidden_states, attention_mask=None,
encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
mixed_query_layer = self.query(hidden_states)
if split_lengths:
assert not self.output_attentions
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if split_lengths:
query_parts = torch.split(mixed_query_layer, split_lengths, dim=1)
key_parts = torch.split(mixed_key_layer, split_lengths, dim=1)
value_parts = torch.split(mixed_value_layer, split_lengths, dim=1)
key = None
value = None
outputs = []
sum_length = 0
for (query, _key, _value, part_length) in zip(query_parts, key_parts, value_parts, split_lengths):
key = _key if key is None else torch.cat((key, _key), dim=1)
value = _value if value is None else torch.cat((value, _value), dim=1)
sum_length += part_length
outputs.append(self.multi_head_attention(
query, key, value, attention_mask[:, :, sum_length - part_length: sum_length, :sum_length],
rel_pos=None if rel_pos is None else rel_pos[:, :, sum_length - part_length: sum_length, :sum_length],
)[0])
outputs = (torch.cat(outputs, dim=1), )
else:
outputs = self.multi_head_attention(
mixed_query_layer, mixed_key_layer, mixed_value_layer,
attention_mask, rel_pos=rel_pos)
return outputs
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
self_outputs = self.self(
hidden_states, attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
self_attention_outputs = self.attention(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self_attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + self_attention_outputs[1:]
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class TuringNLRv3Model(TuringNLRv3PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(TuringNLRv3Model, self).__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
if not isinstance(config, TuringNLRv3ForSeq2SeqConfig):
self.pooler = BertPooler(config)
else:
self.pooler = None
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, inputs_embeds=None, split_lengths=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output, position_ids = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
if self.config.rel_pos_bins > 0:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
else:
rel_pos = None
encoder_outputs = self.encoder(
embedding_output, attention_mask=extended_attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
sequence_output = encoder_outputs[0]
outputs = (sequence_output, ) + encoder_outputs[1:] # add hidden_states and attentions if they are here
if self.pooler is None:
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
else:
pooled_output = self.pooler(sequence_output)
return (sequence_output, pooled_output) + encoder_outputs[1:]
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.float().repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_weight = decoder_weight
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = F.linear(hidden_states, weight=self.decoder_weight, bias=self.bias)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_weight)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
def create_mask_and_position_ids(num_tokens, max_len, offset=None):
base_position_matrix = torch.arange(
0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1)
mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens)
if offset is not None:
base_position_matrix = base_position_matrix + offset.view(-1, 1)
position_ids = base_position_matrix * mask
return mask, position_ids
class TuringNLRv3ForSequenceToSequence(TuringNLRv3PreTrainedModel):
MODEL_NAME = 'basic class'
def __init__(self, config):
super(TuringNLRv3ForSequenceToSequence, self).__init__(config)
self.bert = TuringNLRv3Model(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
self.log_softmax = nn.LogSoftmax()
self.source_type_id = config.source_type_id
self.target_type_id = config.target_type_id
if config.label_smoothing > 0:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
class TuringNLRv3ForSequenceToSequenceWithPseudoMask(TuringNLRv3ForSequenceToSequence):
MODEL_NAME = "TuringNLRv3ForSequenceToSequenceWithPseudoMask"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids, -target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = (0 <= to_weight) & (torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1)
true_tokens_mask = (from_weight >= 0) & true_tokens & (to_weight <= from_weight)
pseudo_tokens_mask = (from_weight < 0) & true_tokens & (-to_weight > from_weight)
pseudo_tokens_mask = pseudo_tokens_mask | ((from_weight < 0) & (to_weight == from_weight))
return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask)
def forward(
self, source_ids, target_ids, label_ids, pseudo_ids,
num_source_tokens, num_target_tokens, target_span_ids=None, target_no_offset=None):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
pseudo_len = pseudo_ids.size(1)
assert target_len == pseudo_len
assert source_len > 0 and target_len > 0
split_lengths = (source_len, target_len, pseudo_len)
input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id,
torch.ones_like(pseudo_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=None if target_no_offset else num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids, target_position_ids), dim=1)
if target_span_ids is None:
target_span_ids = target_position_ids
attention_mask = self.create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
sequence_output = outputs[0]
pseudo_sequence_output = sequence_output[:, source_len + target_len:, ]
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(pseudo_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), label_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), label_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask)
return pseudo_lm_loss
class TuringNLRv3ForSequenceToSequenceUniLMV1(TuringNLRv3ForSequenceToSequence):
MODEL_NAME = "TuringNLRv3ForSequenceToSequenceUniLMV1"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = torch.cat((source_mask, target_mask), dim=1).unsqueeze(1)
return ((true_tokens == 1) & (to_weight <= from_weight)).type_as(source_mask)
def forward(self, source_ids, target_ids, masked_ids, masked_pos, masked_weight, num_source_tokens, num_target_tokens):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
split_lengths = (source_len, target_len)
input_ids = torch.cat((source_ids, target_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids), dim=1)
attention_mask = self.create_attention_mask(
source_mask, target_mask, source_position_ids, target_position_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
sequence_output = outputs[0]
target_sequence_output = sequence_output[:, source_len:, ]
masked_sequence_output = gather_seq_out_by_pos(target_sequence_output, masked_pos)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(masked_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weight)
return pseudo_lm_loss
class TuringNLRv3ForSequenceClassification(TuringNLRv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = TuringNLRv3Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
# head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, last_hidden_state, pooled_output, (hidden_states), (attentions)
| 37,949 | 46.319202 | 146 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/utils.py | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import torch
import tqdm
import array
import collections
import torch.utils.data
from transformers.file_utils import WEIGHTS_NAME
try:
import lmdb
except:
pass
OPTIM_NAME = "optimizer.bin"
logger = logging.getLogger(__name__)
class TrainingExample(object):
def __init__(self, source_ids, target_ids, example_id):
self.source_ids = source_ids
self.target_ids = target_ids
self.example_id = example_id
class Seq2seqDatasetForTuringNLRv3(torch.utils.data.Dataset):
def __init__(
self, features, max_source_len, max_target_len,
vocab_size, cls_id, sep_id, pad_id, mask_id,
random_prob, keep_prob, offset, num_training_instances,
finetuning_method='v1', target_mask_prob=-1.0, num_max_mask_token=0,
source_mask_prob=-1.0,
):
self.features = features
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.offset = offset
if offset > 0:
logger.info(
" **** Set offset %d in Seq2seqDatasetForBert **** ", offset)
self.cls_id = cls_id
self.sep_id = sep_id
self.pad_id = pad_id
self.random_prob = random_prob
self.keep_prob = keep_prob
self.mask_id = mask_id
self.vocab_size = vocab_size
self.num_training_instances = num_training_instances
self.target_mask_prob = target_mask_prob
if finetuning_method == 'v0':
num_max_mask_token = self.max_target_len
logger.info("Mask way v0: set num_max_mask_token = %d" %
num_max_mask_token)
self.num_max_mask_token = num_max_mask_token
self.finetuning_method = finetuning_method
assert finetuning_method in ('v0', 'v1', 'v2')
self.source_mask_prob = source_mask_prob
def __len__(self):
return self.num_training_instances
def __trunk(self, ids, max_len, append_sep=True):
if append_sep:
max_len -= 1
if len(ids) > max_len:
ids = ids[:max_len]
if append_sep:
ids = ids + [self.sep_id]
return ids
def __pad(self, ids, max_len):
if len(ids) < max_len:
return ids + [self.pad_id] * (max_len - len(ids))
else:
assert len(ids) == max_len
return ids
def get_masked_token(self, tk_id):
p = random.random()
if p < self.keep_prob:
return tk_id
elif p < self.keep_prob + self.random_prob:
return random.randint(0, self.vocab_size - 1)
else:
return self.mask_id
def __getitem__(self, _idx):
idx = (self.offset + _idx) % len(self.features)
# print("%d get %d" % (_idx, idx))
feature = self.features[idx]
source_ids = self.__trunk([self.cls_id] + feature.source_ids,
self.max_source_len, append_sep=self.finetuning_method != 'v0')
target_ids = feature.target_ids
if self.finetuning_method == 'v0':
target_ids = [self.sep_id] + target_ids
target_ids = self.__trunk(
target_ids, self.max_target_len, append_sep=self.finetuning_method != 'v0')
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
if self.source_mask_prob > 0:
for i in range(num_source_tokens):
tk_id = source_ids[i]
if tk_id != self.cls_id and tk_id != self.sep_id:
r = random.random()
if r < self.source_mask_prob:
source_ids[i] = self.get_masked_token(tk_id)
source_ids = self.__pad(source_ids, self.max_source_len)
target_ids = self.__pad(target_ids, self.max_target_len)
if self.finetuning_method == 'v0':
masked_pos = []
masked_ids = []
masked_weights = []
for pos in range(num_target_tokens):
if pos + 1 != num_target_tokens:
masked_ids.append(target_ids[pos + 1])
else:
masked_ids.append(self.sep_id)
masked_pos.append(pos)
masked_weights.append(1)
r = random.random()
if r < self.target_mask_prob and pos > 0:
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(
masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.finetuning_method == 'v1':
masked_pos = list(range(num_target_tokens))
random.shuffle(masked_pos)
num_masked_token = \
min(self.num_max_mask_token, int(
self.target_mask_prob * num_target_tokens))
if num_masked_token <= 0:
num_masked_token = 1
masked_pos = masked_pos[:num_masked_token]
masked_ids = []
masked_weights = []
for pos in masked_pos:
masked_ids.append(target_ids[pos])
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_weights.append(1)
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(
masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.finetuning_method == 'v2':
pseudo_ids = []
label_ids = []
for pos in range(num_target_tokens):
tk_id = target_ids[pos]
masked_tk_id = self.get_masked_token(tk_id)
pseudo_ids.append(masked_tk_id)
label_ids.append(tk_id)
r = random.random()
if r < self.target_mask_prob:
target_ids[pos] = masked_tk_id
label_ids = self.__pad(label_ids, self.max_target_len)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len)
return source_ids, target_ids, label_ids, pseudo_ids, num_source_tokens, num_target_tokens
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(
output_dir, "ckpt-*/%s" % WEIGHTS_NAME))
fn_optim_list = glob.glob(os.path.join(
output_dir, "ckpt-*/%s" % OPTIM_NAME))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_model_list]
) & set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def get_checkpoint_state_dict(output_dir, ckpt):
model_recover_checkpoint = os.path.join(
output_dir, "ckpt-%d" % ckpt, WEIGHTS_NAME)
logger.info(" ** Recover model checkpoint in %s ** ",
model_recover_checkpoint)
model_state_dict = torch.load(model_recover_checkpoint, map_location='cpu')
optimizer_recover_checkpoint = os.path.join(
output_dir, "ckpt-%d" % ckpt, OPTIM_NAME)
checkpoint_state_dict = torch.load(
optimizer_recover_checkpoint, map_location='cpu')
checkpoint_state_dict['model'] = model_state_dict
return checkpoint_state_dict
def report_length(length_counter, total_count):
max_len = max(length_counter.keys())
a = 0
tc = 0
while a < max_len:
cc = 0
for i in range(16):
cc += length_counter[a + i]
tc += cc
if cc > 0:
logger.info("%d ~ %d = %d, %.2f%%" %
(a, a + 16, cc, (tc * 100.0) / total_count))
a += 16
def serialize_str(x):
return u"{}".format(x).encode('ascii')
def serialize_array(x, dtype):
data = array.array(dtype)
data.fromlist(x)
return data.tobytes()
def write_to_lmdb(db, key, value):
success = False
while not success:
txn = db.begin(write=True)
try:
txn.put(key, value)
txn.commit()
success = True
except lmdb.MapFullError:
txn.abort()
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit*2
print('>>> Doubling LMDB map size to %sMB ...' %
(new_limit >> 20,))
db.set_mapsize(new_limit) # double it
def deserialize_str(x):
return x.decode('ascii')
class DocDB(object):
def __init__(self, db_path):
self.db_path = db_path
self.env = lmdb.open(db_path, readonly=True,
lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.start_key_index = int(deserialize_str(txn.get(b'__start__')))
self.size = int(deserialize_str(txn.get(b'__size__')))
self.dtype = deserialize_str(txn.get(b'__dtype__'))
def _deserialize_array(self, x):
data = array.array(self.dtype)
data.frombytes(x)
return data.tolist()
def __getitem__(self, doc_id):
with self.env.begin(write=False) as txn:
# example = {
# "source_ids": self._deserialize_array(txn.get(b"src_ids_%d" % doc_id)),
# "target_ids": self._deserialize_array(txn.get(b"tgt_ids_%d" % doc_id)),
# }
example = TrainingExample(
source_ids=self._deserialize_array(
txn.get(b"src_ids_%d" % doc_id)),
target_ids=self._deserialize_array(
txn.get(b"tgt_ids_%d" % doc_id)),
example_id=None,
)
return example
def __len__(self):
return self.size
def load_and_cache_examples(
example_file, tokenizer, local_rank, cached_features_file, shuffle=True,
lmdb_cache=None, lmdb_dtype='h', eval_mode=False):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.isfile(cached_features_file):
logger.info("Loading features from cached file %s",
cached_features_file)
features = torch.load(cached_features_file)
elif cached_features_file is not None and os.path.isdir(cached_features_file) \
and os.path.exists(os.path.join(cached_features_file, 'lock.mdb')):
logger.info("Loading features from cached LMDB %s",
cached_features_file)
features = DocDB(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", example_file)
examples = []
with open(example_file, mode="r", encoding="utf-8") as reader:
for line in reader:
examples.append(json.loads(line))
features = []
slc = collections.defaultdict(int)
tlc = collections.defaultdict(int)
for example in tqdm.tqdm(examples):
if isinstance(example["src"], list):
source_tokens = example["src"]
target_tokens = [] if eval_mode else example["tgt"]
else:
source_tokens = tokenizer.tokenize(example["src"])
target_tokens = [] if eval_mode else tokenizer.tokenize(
example["tgt"])
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
slc[len(source_ids)] += 1
tlc[len(target_ids)] += 1
features.append(
TrainingExample(
source_ids=source_ids,
target_ids=target_ids,
example_id=len(features),
)
)
if shuffle:
random.shuffle(features)
logger.info("Shuffle the features !")
logger.info("Source length:")
report_length(slc, total_count=len(examples))
logger.info("Target length:")
report_length(tlc, total_count=len(examples))
if local_rank in [-1, 0] and cached_features_file is not None:
if lmdb_cache:
db = lmdb.open(cached_features_file,
readonly=False, map_async=True)
for idx, feature in enumerate(features):
write_to_lmdb(
db, b"src_ids_%d" % idx,
serialize_array(feature.source_ids, dtype=lmdb_dtype))
write_to_lmdb(
db, b"tgt_ids_%d" % idx,
serialize_array(feature.target_ids, dtype=lmdb_dtype))
write_to_lmdb(db, b"__start__", serialize_str(0))
write_to_lmdb(db, b"__size__", serialize_str(len(features)))
write_to_lmdb(db, b"__dtype__", serialize_str(lmdb_dtype))
db.sync()
db.close()
logger.info("db_key_idx = %d" % len(features))
del features
features = cached_features_file
logger.info("Saving features into cached lmdb dir %s",
cached_features_file)
else:
logger.info("Saving features into cached file %s",
cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
| 14,533 | 35.888325 | 119 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/modeling_decoding.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None,
reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
logger = logging.getLogger(__name__)
from transformers import WEIGHTS_NAME
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
relax_projection=0,
new_pos_ids=False,
initializer_range=0.02,
task_idx=None,
fp32_embedding=False,
ffn_type=0,
label_smoothing=None,
num_qkv=0,
seg_emb=False,
source_type_id=0,
target_type_id=1,
rel_pos_bins=0,
max_rel_pos=0, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.relax_projection = relax_projection
self.new_pos_ids = new_pos_ids
self.initializer_range = initializer_range
self.task_idx = task_idx
self.fp32_embedding = fp32_embedding
self.ffn_type = ffn_type
self.label_smoothing = label_smoothing
self.num_qkv = num_qkv
self.seg_emb = seg_emb
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
if config.type_vocab_size == 0:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
embeddings = words_embeddings + position_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if hasattr(config, 'num_qkv') and (config.num_qkv > 1):
self.num_qkv = config.num_qkv
else:
self.num_qkv = 1
self.query = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.key = nn.Linear(config.hidden_size,
self.all_head_size * self.num_qkv)
self.value = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.uni_debug_flag = True if os.getenv(
'UNI_DEBUG_FLAG', '') else False
if self.uni_debug_flag:
self.register_buffer('debug_attention_probs',
torch.zeros((512, 512)))
if hasattr(config, 'seg_emb') and config.seg_emb:
self.b_q_s = nn.Parameter(torch.zeros(
1, self.num_attention_heads, 1, self.attention_head_size))
self.seg_emb = nn.Embedding(
config.type_vocab_size, self.all_head_size)
else:
self.b_q_s = None
self.seg_emb = None
def transpose_for_scores(self, x, mask_qkv=None):
if self.num_qkv > 1:
sz = x.size()[:-1] + (self.num_qkv,
self.num_attention_heads, self.all_head_size)
# (batch, pos, num_qkv, head, head_hid)
x = x.view(*sz)
if mask_qkv is None:
x = x[:, :, 0, :, :]
elif isinstance(mask_qkv, int):
x = x[:, :, mask_qkv, :, :]
else:
# mask_qkv: (batch, pos)
if mask_qkv.size(1) > sz[1]:
mask_qkv = mask_qkv[:, :sz[1]]
# -> x: (batch, pos, head, head_hid)
x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand(
sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2)
else:
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
# (batch, pos, head, head_hid)
x = x.view(*sz)
# (batch, head, pos, head_hid)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None,
key_cache=None, value_cache=None, rel_pos=None,
):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(hidden_states, self.key.weight)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(x_states, self.key.weight)
mixed_value_layer = self.value(x_states)
if key_cache is not None and isinstance(key_cache, list):
key_cache.append(mixed_key_layer)
mixed_key_layer = torch.cat(key_cache, dim=1)
if value_cache is not None and isinstance(value_cache, list):
value_cache.append(mixed_value_layer)
mixed_value_layer = torch.cat(value_cache, dim=1)
query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv)
key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv)
value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv)
if key_history is not None and not isinstance(key_history, list):
key_layer = torch.cat((key_history, key_layer), dim=-2)
value_layer = torch.cat((value_history, value_layer), dim=-2)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch, head, pos, pos)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
if self.seg_emb is not None:
seg_rep = self.seg_emb(seg_ids)
# (batch, pos, head, head_hid)
seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size(
1), self.num_attention_heads, self.attention_head_size)
qs = torch.einsum('bnih,bjnh->bnij',
query_layer + self.b_q_s, seg_rep)
attention_scores = attention_scores + qs
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.uni_debug_flag:
_pos = attention_probs.size(-1)
self.debug_attention_probs[:_pos, :_pos].copy_(
attention_probs[0].mean(0).view(_pos, _pos))
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if isinstance(key_history, list):
key_history.append(key_layer)
if isinstance(value_history, list):
value_history.append(value_layer)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerFFN(nn.Module):
def __init__(self, config):
super(TransformerFFN, self).__init__()
self.ffn_type = config.ffn_type
assert self.ffn_type in (1, 2)
if self.ffn_type in (1, 2):
self.wx0 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (2,):
self.wx1 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (1, 2):
self.output = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x):
if self.ffn_type in (1, 2):
x0 = self.wx0(x)
if self.ffn_type == 1:
x1 = x
elif self.ffn_type == 2:
x1 = self.wx1(x)
out = self.output(x0 * x1)
out = self.dropout(out)
out = self.LayerNorm(out + x)
return out
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.ffn_type = config.ffn_type
if self.ffn_type:
self.ffn = TransformerFFN(config)
else:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
if self.ffn_type:
layer_output = self.ffn(attention_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None,
seg_ids=None, key_history=None, value_history=None, rel_pos=None):
# history embedding and encoded layer must be simultanously given
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for i, layer_module in enumerate(self.layer):
set_key = None
if isinstance(key_history, list):
set_key = key_history if len(key_history) < len(self.layer) else key_history[i]
set_value = None
if isinstance(value_history, list):
set_value = value_history if len(key_history) < len(self.layer) else value_history[i]
hidden_states = layer_module(
hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids,
key_history=set_key, value_history=set_value, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
hid_size = config.hidden_size
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
hid_size *= config.relax_projection
self.dense = nn.Linear(config.hidden_size, hid_size)
self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(
bert_model_embedding_weights.size(0)))
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
self.relax_projection = config.relax_projection
else:
self.relax_projection = 0
self.fp32_embedding = config.fp32_embedding
def convert_to_type(tensor):
if self.fp32_embedding:
return tensor.half()
else:
return tensor
self.type_converter = convert_to_type
self.converted = False
def forward(self, hidden_states, task_idx=None):
if not self.converted:
self.converted = True
if self.fp32_embedding:
self.transform.half()
hidden_states = self.transform(self.type_converter(hidden_states))
if self.relax_projection > 1:
num_batch = hidden_states.size(0)
num_pos = hidden_states.size(1)
# (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid)
hidden_states = hidden_states.view(
num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
if self.fp32_embedding:
hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter(
self.decoder.weight), self.type_converter(self.bias))
else:
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights, num_labels=2):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, num_labels)
def forward(self, sequence_output, pooled_output, task_idx=None):
prediction_scores = self.predictions(sequence_output, task_idx)
if pooled_output is None:
seq_relationship_score = None
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
# module.weight.data.copy_(torch.Tensor(
# truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range))
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
logger.info("Model config {}".format(config))
# clean the arguments in kwargs
for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx',
'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing',
'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb',
'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'):
if arg_clean in kwargs:
del kwargs[arg_clean]
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.config = config
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None, rel_pos=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
if self.rel_pos_bias is not None:
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
# print("Rel pos size = %s" % str(rel_pos.size()))
else:
rel_pos = None
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids, rel_pos=rel_pos)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, mask_qkv=None, task_idx=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertPreTrainingPairTransform(nn.Module):
def __init__(self, config):
super(BertPreTrainingPairTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, pair_x, pair_y):
hidden_states = torch.cat([pair_x, pair_y], dim=-1)
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
# hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class BertForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s",
pos_shift=False):
super(BertForSeq2SeqDecoder, self).__init__(config)
self.bert = BertModelIncr(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
assert mode in ("s2s", "l2r")
self.mode = mode
self.pos_shift = pos_shift
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask,
task_idx=task_idx, mask_qkv=mask_qkv)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = new_embedding
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
else:
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
# print("Rel pos size = %s" % str(rel_pos.size()))
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos - input_length + 1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.div(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores))
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding)
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
else:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
if rel_pos is not None:
rel_pos = first_expand(rel_pos)
mask_ids = first_expand(mask_ids)
if mask_qkv is not None:
mask_qkv = first_expand(mask_qkv)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n - 1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not (
self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
# [(batch, beam)]
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
# first we need to find the eos frame where all symbols are eos
# any frames after the eos frame are invalid
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[i, :length, ...] = tensor
return out_tensor
# convert to tensors for DataParallel
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| 67,538 | 44.944898 | 139 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/dataloader.py | import sys
import traceback
import logging
import random
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import torch
from torch.utils.data import IterableDataset
from streaming import StreamSampler
class DataLoaderTrain(IterableDataset):
def __init__(self,
data_dir,
filename_pat,
args,
world_size,
worker_rank,
cuda_device_idx,
news_index,
news_combined,
word_dict,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.shuffle_buffer_size = args.shuffle_buffer_size
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = -1
self.news_combined = news_combined
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_buffer_size=self.shuffle_buffer_size,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
self.sampler.__iter__()
def trans_to_nindex(self, nids):
return [self.news_index[i] if i in self.news_index else 0 for i in nids]
def pad_to_fix_len(self, x, fix_length, padding_front=True, padding_value=0):
if padding_front:
pad_x = [padding_value] * (fix_length-len(x)) + x[-fix_length:]
mask = [0] * (fix_length-len(x)) + [1] * min(fix_length, len(x))
else:
pad_x = x[-fix_length:] + [padding_value]*(fix_length-len(x))
mask = [1] * min(fix_length, len(x)) + [0] * (fix_length-len(x))
return pad_x, mask
def _produce(self):
# need to reset cuda device in produce thread.
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def start_async(self):
self.aval_count = 0
self.stopped = False
self.outputs = Queue(10)
self.pool = ThreadPoolExecutor(1)
self.pool.submit(self._produce)
def _process(self, batch):
batch = [x.decode(encoding="utf-8").split("\t") for x in batch]
user_feature_batch, log_mask_batch, news_feature_batch, label_batch = [], [], [], []
for line in batch:
click_docs = line[3].split()
sess_pos = line[4].split()
sess_neg = line[5].split()
click_docs, log_mask = self.pad_to_fix_len(
self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_combined[click_docs]
pos = self.trans_to_nindex(sess_pos)
neg = self.trans_to_nindex(sess_neg)
label = random.randint(0, self.npratio)
sample_news = neg[:label] + pos + neg[label:]
news_feature = self.news_combined[sample_news]
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(label)
if self.enable_gpu:
user_feature_batch = torch.LongTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
news_feature_batch = torch.LongTensor(news_feature_batch).cuda()
label_batch = torch.LongTensor(label_batch).cuda()
else:
user_feature_batch = torch.LongTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
news_feature_batch = torch.LongTensor(news_feature_batch)
label_batch = torch.LongTensor(label_batch)
return user_feature_batch, log_mask_batch, news_feature_batch, label_batch
def __iter__(self):
"""Implement IterableDataset method to provide data iterator."""
logging.info("DataLoader __iter__()")
if self.enable_prefetch:
self.join()
self.start_async()
else:
self.start()
return self
def __next__(self):
if self.sampler and self.sampler.reach_end() and self.aval_count == 0:
raise StopIteration
if self.enable_prefetch:
next_batch = self.outputs.get()
self.outputs.task_done()
self.aval_count -= 1
else:
next_batch = self._process(self.sampler.__next__())
return next_batch
def join(self):
self.stopped = True
if self.sampler:
if self.enable_prefetch:
while self.outputs.qsize() > 0:
self.outputs.get()
self.outputs.task_done()
self.outputs.join()
self.pool.shutdown(wait=True)
logging.info("shut down pool.")
self.sampler = None
class DataLoaderTest(DataLoaderTrain):
def __init__(self,
data_dir,
filename_pat,
args,
world_size,
worker_rank,
cuda_device_idx,
news_index,
news_scoring,
word_dict,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = -1
self.news_scoring = news_scoring
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
self.sampler.__iter__()
def _produce(self):
# need to reset cuda device in produce thread.
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
# t0 = time.time()
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
# logging.info(f"_produce cost:{time.time()-t0}")
# t0 = time.time()
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def _process(self, batch):
batch_size = len(batch)
batch = [x.decode(encoding="utf-8").split("\t") for x in batch]
user_feature_batch, log_mask_batch, news_feature_batch, label_batch = [], [], [], []
for line in batch:
click_docs = line[3].split()
click_docs, log_mask = self.pad_to_fix_len(
self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_scoring[click_docs]
sample_news = self.trans_to_nindex(
[i.split('-')[0] for i in line[4].split()])
labels = [int(i.split('-')[1]) for i in line[4].split()]
news_feature = self.news_scoring[sample_news]
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(np.array(labels))
if self.enable_gpu:
user_feature_batch = torch.FloatTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
else:
user_feature_batch = torch.FloatTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
return user_feature_batch, log_mask_batch, news_feature_batch, label_batch
| 10,287 | 34.84669 | 92 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/utils.py | import logging
import os
import sys
import torch
import numpy as np
import argparse
import re
from tnlrv3.modeling import TuringNLRv3ForSequenceClassification
from tnlrv3.configuration_tnlrv3 import TuringNLRv3Config
from tnlrv3.tokenization_tnlrv3 import TuringNLRv3Tokenizer
from transformers import BertTokenizer, BertConfig, BertModel
from transformers import RobertaTokenizer, RobertaConfig, RobertaModel
MODEL_CLASSES = {
'tnlrv3': (TuringNLRv3Config, TuringNLRv3ForSequenceClassification, TuringNLRv3Tokenizer),
'bert': (BertConfig, BertModel, BertTokenizer),
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)
}
def word_tokenize(sent):
pat = re.compile(r'[\w]+|[.,!?;|]')
if isinstance(sent, str):
return pat.findall(sent.lower())
else:
return []
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def init_hvd_cuda(enable_hvd=True, enable_gpu=True):
hvd = None
if enable_hvd:
import horovod.torch as hvd
hvd.init()
logging.info(
f"hvd_size:{hvd.size()}, hvd_rank:{hvd.rank()}, hvd_local_rank:{hvd.local_rank()}"
)
hvd_size = hvd.size() if enable_hvd else 1
hvd_rank = hvd.rank() if enable_hvd else 0
hvd_local_rank = hvd.local_rank() if enable_hvd else 0
if enable_gpu:
torch.cuda.set_device(hvd_local_rank)
return hvd_size, hvd_rank, hvd_local_rank
def setuplogger():
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s %(asctime)s] %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
def dump_args(args):
for arg in dir(args):
if not arg.startswith("_"):
logging.info(f"args[{arg}]={getattr(args, arg)}")
def acc(y_true, y_hat):
y_hat = torch.argmax(y_hat, dim=-1)
tot = y_true.shape[0]
hit = torch.sum(y_true == y_hat)
return hit.data.float() * 1.0 / tot
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2**y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def load_matrix(embedding_file_path, word_dict, word_embedding_dim):
embedding_matrix = np.zeros(shape=(len(word_dict) + 1,
word_embedding_dim))
have_word = []
if embedding_file_path is not None:
with open(embedding_file_path, 'rb') as f:
while True:
line = f.readline()
if len(line) == 0:
break
line = line.split()
word = line[0].decode()
if word in word_dict:
index = word_dict[word]
tp = [float(x) for x in line[1:]]
embedding_matrix[index] = np.array(tp)
have_word.append(word)
return embedding_matrix, have_word
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def get_checkpoint(directory, ckpt_name):
ckpt_path = os.path.join(directory, ckpt_name)
if os.path.exists(ckpt_path):
return ckpt_path
else:
return None
| 4,173 | 27.589041 | 94 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/run.py | import numpy as np
import torch
import logging
from tqdm.auto import tqdm
import torch.optim as optim
import utils
import os
from pathlib import Path
import random
from dataloader import DataLoaderTrain, DataLoaderTest
from torch.utils.data import Dataset, DataLoader
from streaming import get_stat, get_worker_files
from parameters import parse_args
from preprocess import read_news_bert, get_doc_input_bert
from model_bert import ModelBert
def train(args):
if args.enable_hvd:
import horovod.torch as hvd
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
stat = get_stat(args.train_data_dir, args.filename_pat)
print(stat)
data_paths = get_worker_files(args.train_data_dir,
hvd_rank, hvd_size, args.filename_pat, args.enable_shuffle, 0
)
sample_num = 0
for file in data_paths:
sample_num += stat[file]
logging.info("[{}] contains {} samples {} steps".format(
hvd_rank, sample_num, sample_num // args.batch_size))
news, news_index, category_dict, subcategory_dict = read_news_bert(
os.path.join(args.train_data_dir, 'news.tsv'), args, mode='train'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=-1)
model = ModelBert(args)
if args.use_pretrain_model:
ckpt = torch.load(args.pretrain_model_path, map_location='cpu')
pretrained_dict = ckpt["model_state_dict"]
model_dict = model.state_dict()
remain_key = list(model_dict.keys())
pretrained_key = []
for k, v in pretrained_dict.items():
if not k.startswith('student'):
continue
key = k
model_dict[key].copy_(v)
pretrained_key.append(key)
remain_key.remove(key)
model.load_state_dict(model_dict)
if hvd_rank == 0:
logging.info(f"loaded pretrain model: {args.pretrain_model_path}")
print(f'{len(pretrained_key)} loaded pretrained parameters:')
for k in pretrained_key:
print(f'\t{k}')
print(f'{len(remain_key)} randomly initialized parameters:')
for k in remain_key:
print(f'\t{k}')
del ckpt
torch.cuda.empty_cache()
for param in model.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.news_encoder.bert_model.bert.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
if args.enable_gpu:
model = model.cuda()
pretrained_param = []
rest_param = []
for name, param in model.named_parameters():
if name in pretrained_key:
pretrained_param.append(param)
else:
rest_param.append(param)
optimizer = torch.optim.Adam([
{'params': pretrained_param, 'lr': args.pretrain_lr},
{'params': rest_param, 'lr': args.lr}], amsgrad=True)
else:
if args.model_type == 'tnlrv3':
for param in model.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.news_encoder.bert_model.bert.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
else:
for param in model.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.news_encoder.bert_model.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
if args.enable_gpu:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, amsgrad=True)
word_dict = None
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
checkpoint = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
logging.info(f"Model loaded from {ckpt_path}")
if hvd_rank == 0:
print(model)
for name, param in model.named_parameters():
print(name, param.requires_grad)
if args.enable_hvd:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
compression = hvd.Compression.none
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Average)
dataloader = DataLoaderTrain(
news_index=news_index,
news_combined=news_combined,
word_dict=word_dict,
data_dir=args.train_data_dir,
filename_pat=args.filename_pat,
args=args,
world_size=hvd_size,
worker_rank=hvd_rank,
cuda_device_idx=hvd_local_rank,
enable_prefetch=True,
enable_shuffle=True,
enable_gpu=args.enable_gpu,
)
logging.info('Training...')
for ep in range(args.start_epoch, args.epochs):
loss = 0.0
accuary = 0.0
for cnt, (log_ids, log_mask, input_ids, targets) in enumerate(dataloader):
if cnt > args.max_steps_per_epoch:
break
bz_loss, y_hat = model(log_ids, log_mask, input_ids, targets)
loss += bz_loss.data.float()
accuary += utils.acc(targets, y_hat)
optimizer.zero_grad()
bz_loss.backward()
optimizer.step()
if cnt % args.log_steps == 0:
logging.info(
'[{}] Ed: {}, train_loss: {:.5f}, acc: {:.5f}'.format(
hvd_rank, cnt * args.batch_size, loss.data / cnt,
accuary / cnt))
loss /= cnt
print(ep + 1, loss)
# save model last of epoch
if hvd_rank == 0:
ckpt_path = os.path.join(args.model_dir, f'epoch-{ep+1}.pt')
torch.save(
{
'model_state_dict': model.state_dict(),
'category_dict': category_dict,
'word_dict': word_dict,
'subcategory_dict': subcategory_dict
}, ckpt_path)
logging.info(f"Model saved to {ckpt_path}")
dataloader.join()
def test(args):
if args.enable_hvd:
import horovod.torch as hvd
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
assert ckpt_path is not None, 'No ckpt found'
checkpoint = torch.load(ckpt_path)
subcategory_dict = checkpoint['subcategory_dict']
category_dict = checkpoint['category_dict']
word_dict = checkpoint['word_dict']
model = ModelBert(args)
if args.enable_gpu:
model.cuda()
model.load_state_dict(checkpoint['model_state_dict'])
logging.info(f"Model loaded from {ckpt_path}")
if args.enable_hvd:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
model.eval()
torch.set_grad_enabled(False)
news, news_index = read_news_bert(
os.path.join(args.test_data_dir, 'news.tsv'), args, mode='test'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=1)
class NewsDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.data.shape[0]
def news_collate_fn(arr):
arr = torch.LongTensor(arr)
return arr
news_dataset = NewsDataset(news_combined)
news_dataloader = DataLoader(news_dataset,
batch_size=args.batch_size * 4,
num_workers=args.num_workers,
collate_fn=news_collate_fn)
news_scoring = []
with torch.no_grad():
for input_ids in tqdm(news_dataloader):
input_ids = input_ids.cuda()
news_vec = model.news_encoder(input_ids)
news_vec = news_vec.to(torch.device("cpu")).detach().numpy()
news_scoring.extend(news_vec)
news_scoring = np.array(news_scoring)
logging.info("news scoring num: {}".format(news_scoring.shape[0]))
doc_sim = 0
for _ in tqdm(range(1000000)):
i = random.randrange(1, len(news_scoring))
j = random.randrange(1, len(news_scoring))
if i != j:
doc_sim += np.dot(news_scoring[i], news_scoring[j]) / (
np.linalg.norm(news_scoring[i]) * np.linalg.norm(news_scoring[j]))
print(f'=== doc-sim: {doc_sim / 1000000} ===')
dataloader = DataLoaderTest(
news_index=news_index,
news_scoring=news_scoring,
word_dict=word_dict,
data_dir=args.test_data_dir,
filename_pat=args.filename_pat,
args=args,
world_size=hvd_size,
worker_rank=hvd_rank,
cuda_device_idx=hvd_local_rank,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=args.enable_gpu,
)
from metrics import roc_auc_score, ndcg_score, mrr_score
AUC = []
MRR = []
nDCG5 = []
nDCG10 = []
def print_metrics(hvd_local_rank, cnt, x):
logging.info("[{}] Ed: {}: {}".format(hvd_local_rank, cnt,
'\t'.join(["{:0.2f}".format(i * 100) for i in x])))
def get_mean(arr):
return [np.array(i).mean() for i in arr]
def get_sum(arr):
return [np.array(i).sum() for i in arr]
local_sample_num = 0
for cnt, (log_vecs, log_mask, news_vecs, labels) in enumerate(dataloader):
local_sample_num += log_vecs.shape[0]
user_vecs = model.user_encoder(log_vecs, log_mask).to(
torch.device("cpu")).detach().numpy()
for user_vec, news_vec, label in zip(user_vecs, news_vecs, labels):
if label.mean() == 0 or label.mean() == 1:
continue
score = np.dot(news_vec, user_vec)
auc = roc_auc_score(label, score)
mrr = mrr_score(label, score)
ndcg5 = ndcg_score(label, score, k=5)
ndcg10 = ndcg_score(label, score, k=10)
AUC.append(auc)
MRR.append(mrr)
nDCG5.append(ndcg5)
nDCG10.append(ndcg10)
if cnt % args.log_steps == 0:
print_metrics(hvd_rank, local_sample_num,
get_mean([AUC, MRR, nDCG5, nDCG10]))
# stop scoring
dataloader.join()
logging.info('[{}] local_sample_num: {}'.format(
hvd_rank, local_sample_num))
total_sample_num = hvd.allreduce(
torch.tensor(local_sample_num), op=hvd.Sum)
local_metrics_sum = get_sum([AUC, MRR, nDCG5, nDCG10])
total_metrics_sum = hvd.allreduce(torch.tensor(
local_metrics_sum, dtype=float), op=hvd.Sum)
if hvd_rank == 0:
print_metrics(hvd_rank, total_sample_num,
total_metrics_sum / total_sample_num)
if __name__ == "__main__":
utils.setuplogger()
args = parse_args()
Path(args.model_dir).mkdir(parents=True, exist_ok=True)
if 'train' in args.mode:
train(args)
if 'test' in args.mode:
test(args)
| 12,540 | 32.265252 | 97 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/parameters.py | import argparse
import utils
import logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode",
type=str,
default="train",
choices=['train', 'test'])
parser.add_argument(
"--train_data_dir",
type=str,
default="../MIND/MINDlarge_train",
)
parser.add_argument(
"--test_data_dir",
type=str,
default="../MIND/MINDlarge_test",
)
parser.add_argument("--filename_pat", type=str,
default="behaviors_np4_*.tsv")
parser.add_argument("--model_dir", type=str, default='./model')
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--npratio", type=int, default=4)
parser.add_argument("--enable_gpu", type=utils.str2bool, default=True)
parser.add_argument("--enable_hvd", type=utils.str2bool, default=True)
parser.add_argument("--enable_shuffle", type=utils.str2bool, default=True)
parser.add_argument("--shuffle_buffer_size", type=int, default=10000)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--filter_num", type=int, default=3)
parser.add_argument("--log_steps", type=int, default=100)
# model training
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--num_words_title", type=int, default=20)
parser.add_argument("--num_words_abstract", type=int, default=50)
parser.add_argument("--num_words_body", type=int, default=100)
parser.add_argument(
"--user_log_length",
type=int,
default=50,
)
parser.add_argument(
"--word_embedding_dim",
type=int,
default=300,
)
parser.add_argument(
"--glove_embedding_path",
type=str,
default='./glove.840B.300d.txt',
)
parser.add_argument("--freeze_embedding",
type=utils.str2bool,
default=False)
parser.add_argument(
"--news_dim",
type=int,
default=64,
)
parser.add_argument(
"--news_query_vector_dim",
type=int,
default=200,
)
parser.add_argument(
"--user_query_vector_dim",
type=int,
default=200,
)
parser.add_argument(
"--num_attention_heads",
type=int,
default=20,
)
parser.add_argument("--user_log_mask", type=utils.str2bool, default=True)
parser.add_argument("--drop_rate", type=float, default=0.2)
parser.add_argument("--save_steps", type=int, default=1000)
parser.add_argument("--max_steps_per_epoch", type=int, default=1000000)
parser.add_argument(
"--load_ckpt_name",
type=str,
default=None,
help="choose which ckpt to load and test"
)
# bert
parser.add_argument("--apply_bert", type=utils.str2bool, default=False)
parser.add_argument("--model_type", default="bert", type=str)
parser.add_argument("--do_lower_case", type=utils.str2bool, default=True)
parser.add_argument(
"--model_name", default="../bert-base-uncased/pytorch_model.bin", type=str)
parser.add_argument(
"--config_name", default="../bert-base-uncased/config.json", type=str)
parser.add_argument("--tokenizer_name",
default="../bert-base-uncased/vocab.txt", type=str)
parser.add_argument("--num_hidden_layers", type=int, default=8)
parser.add_argument(
"--bert_trainable_layer",
type=int, nargs='+',
default=[],
choices=list(range(12)))
parser.add_argument("--model", type=str, default=None)
parser.add_argument("--pooling", type=str, default='att')
parser.add_argument("--start_epoch", type=int, default=0)
parser.add_argument("--use_pretrain_model",
type=utils.str2bool, default=False)
parser.add_argument("--pretrain_model_path", type=str, default=None)
parser.add_argument("--pretrain_lr", type=float, default=0.00001)
args = parser.parse_args()
logging.info(args)
return args
if __name__ == "__main__":
args = parse_args()
| 4,252 | 32.753968 | 83 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/model_bert.py | import numpy as np
import torch
from torch import nn
from utils import MODEL_CLASSES
class AttentionPooling(nn.Module):
def __init__(self, emb_size, hidden_size):
super(AttentionPooling, self).__init__()
self.att_fc1 = nn.Linear(emb_size, hidden_size)
self.att_fc2 = nn.Linear(hidden_size, 1)
def forward(self, x, attn_mask=None):
"""
Args:
x: batch_size, candidate_size, emb_dim
attn_mask: batch_size, candidate_size
Returns:
(shape) batch_size, emb_dim
"""
bz = x.shape[0]
e = self.att_fc1(x)
e = nn.Tanh()(e)
alpha = self.att_fc2(e)
alpha = torch.exp(alpha)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)
x = torch.bmm(x.permute(0, 2, 1), alpha).squeeze(dim=-1)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask=None):
'''
Q: batch_size, n_head, candidate_num, d_k
K: batch_size, n_head, candidate_num, d_k
V: batch_size, n_head, candidate_num, d_v
attn_mask: batch_size, n_head, candidate_num
Return: batch_size, n_head, candidate_num, d_v
'''
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
scores = torch.exp(scores)
if attn_mask is not None:
scores = scores * attn_mask.unsqueeze(dim=-2)
attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)
context = torch.matmul(attn, V)
return context
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_model, n_heads, d_k, d_v):
super(MultiHeadSelfAttention, self).__init__()
self.d_model = d_model
self.n_heads = n_heads
self.d_k = d_k
self.d_v = d_v
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, Q, K, V, mask=None):
'''
Q: batch_size, candidate_num, d_model
K: batch_size, candidate_num, d_model
V: batch_size, candidate_num, d_model
mask: batch_size, candidate_num
'''
batch_size = Q.shape[0]
if mask is not None:
mask = mask.unsqueeze(dim=1).expand(-1, self.n_heads, -1)
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
context = self.scaled_dot_product_attn(q_s, k_s, v_s, mask)
output = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
return output
class NewsEncoder(nn.Module):
def __init__(self, args):
super(NewsEncoder, self).__init__()
self.pooling = args.pooling
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
self.output_index = 3 if args.model_type == 'tnlrv3' else 2
self.bert_config = config_class.from_pretrained(args.config_name,
output_hidden_states=True,
num_hidden_layers=args.num_hidden_layers)
self.bert_model = model_class.from_pretrained(args.model_name, config=self.bert_config)
if args.pooling == 'att':
self.attn = AttentionPooling(self.bert_config.hidden_size, args.news_query_vector_dim)
self.dense = nn.Linear(self.bert_config.hidden_size, args.news_dim)
def forward(self, x):
'''
x: batch_size, word_num * 2
mask: batch_size, word_num
'''
batch_size, num_words = x.shape
num_words = num_words // 2
text_ids = torch.narrow(x, 1, 0, num_words)
text_attmask = torch.narrow(x, 1, num_words, num_words)
word_vecs = self.bert_model(
text_ids, text_attmask)[self.output_index][self.bert_config.num_hidden_layers]
if self.pooling == 'cls':
news_vec = torch.narrow(word_vecs, 1, 0, 1).squeeze(dim=1)
elif self.pooling == 'att':
news_vec = self.attn(word_vecs)
else:
news_vec = torch.mean(word_vecs, dim=1)
news_vec = self.dense(news_vec)
return news_vec
class UserEncoder(nn.Module):
def __init__(self, args):
super(UserEncoder, self).__init__()
self.args = args
if args.model == 'NRMS':
self.multi_head_self_attn = MultiHeadSelfAttention(args.news_dim,
args.num_attention_heads, 16, 16)
self.attn = AttentionPooling(args.num_attention_heads * 16, args.user_query_vector_dim)
else:
self.attn = AttentionPooling(args.news_dim, args.user_query_vector_dim)
self.pad_doc = nn.Parameter(torch.empty(1,
args.news_dim).uniform_(-1,
1)).type(torch.FloatTensor)
def forward(self, news_vecs, log_mask=None):
'''
news_vecs: batch_size, history_num, news_dim
log_mask: batch_size, history_num
'''
bz = news_vecs.shape[0]
if self.args.user_log_mask:
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(news_vecs, news_vecs, news_vecs, log_mask)
user_vec = self.attn(news_vecs, log_mask)
else:
user_vec = self.attn(news_vecs, log_mask)
else:
padding_doc = self.pad_doc.unsqueeze(dim=0).expand(bz, self.args.user_log_length, -1)
news_vecs = news_vecs * \
log_mask.unsqueeze(dim=-1) + padding_doc * \
(1 - log_mask.unsqueeze(dim=-1))
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(news_vecs, news_vecs, news_vecs)
user_vec = self.attn(news_vecs)
else:
user_vec = self.attn(news_vecs)
return user_vec
class ModelBert(torch.nn.Module):
def __init__(self, args):
super(ModelBert, self).__init__()
self.args = args
self.news_encoder = NewsEncoder(args)
self.user_encoder = UserEncoder(args)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, history, history_mask, candidate, label):
'''
history: batch_size, history_length, num_word_title * 2
history_mask: batch_size, history_length
candidate: batch_size, 1+K, num_word_title * 2
label: batch_size, 1+K
'''
batch_size = history.shape[0]
input_id_num = history.shape[-1]
candidate_news = candidate.reshape(-1, input_id_num)
candidate_news_vecs = self.news_encoder(candidate_news).reshape(
batch_size, -1, self.args.news_dim)
history_news = history.reshape(-1, input_id_num)
history_news_vecs = self.news_encoder(history_news).reshape(-1, self.args.user_log_length,
self.args.news_dim)
user_vec = self.user_encoder(history_news_vecs, history_mask)
score = torch.bmm(candidate_news_vecs, user_vec.unsqueeze(dim=-1)).squeeze(dim=-1)
loss = self.loss_fn(score, label)
return loss, score
| 8,077 | 37.836538 | 99 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/convert_state_dict.py | import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def get_checkpoint_from_transformer_cache(
archive_file, pretrained_model_name_or_path, pretrained_model_archive_map,
cache_dir, force_download, proxies, resume_download,
):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download,
proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file)
else:
msg = "Model name '{}' was not found in model name list ({}). " \
"We assumed '{}' was a path or url to model weight files named one of {} but " \
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
', '.join(pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME])
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
return torch.load(resolved_archive_file, map_location='cpu')
def load_model(state_dict):
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.endswith("attention.self.q_bias"):
new_state_dict[key.replace(
"attention.self.q_bias", "attention.self.query.bias")] = value.view(-1)
elif key.endswith("attention.self.v_bias"):
new_state_dict[key.replace(
"attention.self.v_bias", "attention.self.value.bias")] = value.view(-1)
new_state_dict[key.replace(
"attention.self.v_bias", "attention.self.key.bias")] = torch.zeros_like(value.view(-1))
elif key.endswith("attention.self.qkv_linear.weight"):
l, _ = value.size()
assert l % 3 == 0
l = l // 3
q, k, v = torch.split(
value, split_size_or_sections=(l, l, l), dim=0)
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.query.weight")] = q
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.key.weight")] = k
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.value.weight")] = v
elif key == "bert.encoder.rel_pos_bias.weight":
new_state_dict["bert.rel_pos_bias.weight"] = value
else:
new_state_dict[key] = value
del state_dict
return new_state_dict
state_dict_convert = {
'tnlrv3': load_model,
}
| 3,162 | 40.077922 | 109 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/s2s_loader.py | import numpy as np
from random import randint
import logging
import torch
import torch.utils.data
logger = logging.getLogger(__name__)
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.word_subsample_prb = None
self.sp_prob = None
self.pieces_dir = None
self.vocab_words = None
self.pieces_threshold = 10
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def __call__(self, instance):
raise NotImplementedError
class Preprocess4Seq2seqDecoder(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128,
mode="s2s", pos_shift=False, source_type_id=0, target_type_id=1,
cls_token='[CLS]', sep_token='[SEP]', pad_token='[PAD]'):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.task_idx = 3 # relax projection layer for different tasks
assert mode in ("s2s", "l2r")
self.mode = mode
self.max_tgt_length = max_tgt_length
self.pos_shift = pos_shift
self.delta = 1 if pos_shift else 2
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.cc = 0
def __call__(self, instance):
tokens_a, max_a_len = instance
padded_tokens_a = [self.cls_token] + tokens_a
if not self.pos_shift:
padded_tokens_a = padded_tokens_a + [self.sep_token]
assert len(padded_tokens_a) <= max_a_len + self.delta
if max_a_len + self.delta > len(padded_tokens_a):
padded_tokens_a += [self.pad_token] * \
(max_a_len + self.delta - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + self.delta
max_len_in_batch = min(self.max_tgt_length +
max_a_len + self.delta, self.max_len)
tokens = padded_tokens_a
segment_ids = [self.source_type_id] * (len(padded_tokens_a)) \
+ [self.target_type_id] * (max_len_in_batch - len(padded_tokens_a))
mask_qkv = None
position_ids = []
for i in range(len(tokens_a) + self.delta):
position_ids.append(i)
for i in range(len(tokens_a) + self.delta, max_a_len + self.delta):
position_ids.append(0)
for i in range(max_a_len + self.delta, max_len_in_batch):
position_ids.append(
i - (max_a_len + self.delta) + len(tokens_a) + self.delta)
# Token Indexing
input_ids = self.indexer(tokens)
self.cc += 1
if self.cc < 20:
# print("Vocab size = %d" % len(self.vocab_words))
# for tk_id in input_ids:
# print(u"trans %d -> %s" % (tk_id, self.vocab_words[tk_id]))
logger.info(u"Input src = %s" % " ".join(
(self.vocab_words[tk_id]) for tk_id in input_ids))
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
if self.mode == "s2s":
input_mask[:, :len(tokens_a) + self.delta].fill_(1)
else:
st, end = 0, len(tokens_a) + self.delta
input_mask[st:end, st:end].copy_(
self._tril_matrix[:end, :end])
input_mask[end:, :len(tokens_a) + self.delta].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)
| 5,318 | 32.878981 | 90 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/modeling.py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from transformers.modeling_bert import \
BertPreTrainedModel, BertSelfOutput, BertIntermediate, \
BertOutput, BertPredictionHeadTransform, BertPooler
from transformers.file_utils import WEIGHTS_NAME
from tnlrv3.config import TuringNLRv3ForSeq2SeqConfig
from tnlrv3.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert
logger = logging.getLogger(__name__)
BertLayerNorm = torch.nn.LayerNorm
TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP = {
}
class TuringNLRv3PreTrainedModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = TuringNLRv3ForSeq2SeqConfig
supported_convert_pretrained_model_archive_map = {
"tnlrv3": TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP,
}
base_model_prefix = "TuringNLRv3_for_seq2seq"
pretrained_model_archive_map = {
**TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP,
}
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, reuse_position_embedding=None,
replace_prefix=None, *model_args, **kwargs,
):
model_type = kwargs.pop('model_type', 'tnlrv3')
if model_type is not None and "state_dict" not in kwargs:
if model_type in cls.supported_convert_pretrained_model_archive_map:
pretrained_model_archive_map = cls.supported_convert_pretrained_model_archive_map[
model_type]
if pretrained_model_name_or_path in pretrained_model_archive_map:
state_dict = get_checkpoint_from_transformer_cache(
archive_file=pretrained_model_archive_map[pretrained_model_name_or_path],
pretrained_model_name_or_path=pretrained_model_name_or_path,
pretrained_model_archive_map=pretrained_model_archive_map,
cache_dir=kwargs.get("cache_dir", None), force_download=kwargs.get("force_download", None),
proxies=kwargs.get("proxies", None), resume_download=kwargs.get("resume_download", None),
)
state_dict = state_dict_convert[model_type](state_dict)
kwargs["state_dict"] = state_dict
logger.info("Load HF ckpts")
elif os.path.isfile(pretrained_model_name_or_path):
state_dict = torch.load(
pretrained_model_name_or_path, map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](
state_dict)
logger.info("Load local ckpts")
elif os.path.isdir(pretrained_model_name_or_path):
state_dict = torch.load(os.path.join(
pretrained_model_name_or_path, WEIGHTS_NAME), map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](
state_dict)
logger.info("Load local ckpts")
else:
raise RuntimeError(
"Not fined the pre-trained checkpoint !")
if kwargs["state_dict"] is None:
logger.info("TNLRv3 does't support the model !")
raise NotImplementedError()
config = kwargs["config"]
state_dict = kwargs["state_dict"]
# initialize new position embeddings (From Microsoft/UniLM)
_k = 'bert.embeddings.position_embeddings.weight'
if _k in state_dict:
if config.max_position_embeddings > state_dict[_k].shape[0]:
logger.info("Resize > position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(
data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(
mean=0.0, std=config.initializer_range)
max_range = config.max_position_embeddings if reuse_position_embedding else old_vocab_size
shift = 0
while shift < max_range:
delta = min(old_vocab_size, max_range - shift)
new_postion_embedding.data[shift: shift +
delta, :] = state_dict[_k][:delta, :]
logger.info(" CP [%d ~ %d] into [%d ~ %d] " %
(0, delta, shift, shift + delta))
shift += delta
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
elif config.max_position_embeddings < state_dict[_k].shape[0]:
logger.info("Resize < position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(
data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(
mean=0.0, std=config.initializer_range)
new_postion_embedding.data.copy_(
state_dict[_k][:config.max_position_embeddings, :])
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
if replace_prefix is not None:
new_state_dict = {}
for key in state_dict:
if key.startswith(replace_prefix):
new_state_dict[key[len(replace_prefix):]] = state_dict[key]
else:
new_state_dict[key] = state_dict[key]
kwargs["state_dict"] = new_state_dict
del state_dict
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0)
fix_word_embedding = getattr(config, "fix_word_embedding", None)
if fix_word_embedding:
self.word_embeddings.weight.requires_grad = False
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
else:
self.token_type_embeddings = None
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
if self.token_type_embeddings:
embeddings = embeddings + \
self.token_type_embeddings(token_type_ids)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings, position_ids
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def multi_head_attention(self, query, key, value, attention_mask, rel_pos):
query_layer = self.transpose_for_scores(query)
key_layer = self.transpose_for_scores(key)
value_layer = self.transpose_for_scores(value)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / \
math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs) if self.output_attentions else (context_layer,)
def forward(self, hidden_states, attention_mask=None,
encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
mixed_query_layer = self.query(hidden_states)
if split_lengths:
assert not self.output_attentions
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if split_lengths:
query_parts = torch.split(mixed_query_layer, split_lengths, dim=1)
key_parts = torch.split(mixed_key_layer, split_lengths, dim=1)
value_parts = torch.split(mixed_value_layer, split_lengths, dim=1)
key = None
value = None
outputs = []
sum_length = 0
for (query, _key, _value, part_length) in zip(query_parts, key_parts, value_parts, split_lengths):
key = _key if key is None else torch.cat((key, _key), dim=1)
value = _value if value is None else torch.cat(
(value, _value), dim=1)
sum_length += part_length
outputs.append(self.multi_head_attention(
query, key, value, attention_mask[:, :,
sum_length - part_length: sum_length, :sum_length],
rel_pos=None if rel_pos is None else rel_pos[:, :,
sum_length - part_length: sum_length, :sum_length],
)[0])
outputs = (torch.cat(outputs, dim=1), )
else:
outputs = self.multi_head_attention(
mixed_query_layer, mixed_key_layer, mixed_value_layer,
attention_mask, rel_pos=rel_pos)
return outputs
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
self_outputs = self.self(
hidden_states, attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self.output(self_outputs[0], hidden_states)
# add attentions if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
self_attention_outputs = self.attention(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self_attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + self_attention_outputs[1:]
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
# last-layer hidden state, (all hidden states), (all attentions)
return outputs
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class TuringNLRv3Model(TuringNLRv3PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(TuringNLRv3Model, self).__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
if not isinstance(config, TuringNLRv3ForSeq2SeqConfig):
self.pooler = BertPooler(config)
else:
self.pooler = None
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(
self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, inputs_embeds=None, split_lengths=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output, position_ids = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
if self.config.rel_pos_bins > 0:
rel_pos_mat = position_ids.unsqueeze(-2) - \
position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(
embedding_output)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
else:
rel_pos = None
encoder_outputs = self.encoder(
embedding_output, attention_mask=extended_attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
sequence_output = encoder_outputs[0]
# add hidden_states and attentions if they are here
outputs = (sequence_output, ) + encoder_outputs[1:]
if self.pooler is None:
# sequence_output, pooled_output, (hidden_states), (attentions)
return outputs
else:
pooled_output = self.pooler(sequence_output)
return (sequence_output, pooled_output) + encoder_outputs[1:]
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.float().repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_weight = decoder_weight
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = F.linear(
hidden_states, weight=self.decoder_weight, bias=self.bias)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_weight)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
def create_mask_and_position_ids(num_tokens, max_len, offset=None):
base_position_matrix = torch.arange(
0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1)
mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens)
if offset is not None:
base_position_matrix = base_position_matrix + offset.view(-1, 1)
position_ids = base_position_matrix * mask
return mask, position_ids
class TuringNLRv3ForSequenceToSequence(TuringNLRv3PreTrainedModel):
MODEL_NAME = 'basic class'
def __init__(self, config):
super(TuringNLRv3ForSequenceToSequence, self).__init__(config)
self.bert = TuringNLRv3Model(config)
self.cls = BertOnlyMLMHead(
config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
self.log_softmax = nn.LogSoftmax()
self.source_type_id = config.source_type_id
self.target_type_id = config.target_type_id
if config.label_smoothing > 0:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
class TuringNLRv3ForSequenceToSequenceWithPseudoMask(TuringNLRv3ForSequenceToSequence):
MODEL_NAME = "TuringNLRv3ForSequenceToSequenceWithPseudoMask"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids),
target_span_ids, -target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = (0 <= to_weight) & (
torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1)
true_tokens_mask = (from_weight >= 0) & true_tokens & (
to_weight <= from_weight)
pseudo_tokens_mask = (
from_weight < 0) & true_tokens & (-to_weight > from_weight)
pseudo_tokens_mask = pseudo_tokens_mask | (
(from_weight < 0) & (to_weight == from_weight))
return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask)
def forward(
self, source_ids, target_ids, label_ids, pseudo_ids,
num_source_tokens, num_target_tokens, target_span_ids=None, target_no_offset=None):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
pseudo_len = pseudo_ids.size(1)
assert target_len == pseudo_len
assert source_len > 0 and target_len > 0
split_lengths = (source_len, target_len, pseudo_len)
input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id,
torch.ones_like(pseudo_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=None if target_no_offset else num_source_tokens)
position_ids = torch.cat(
(source_position_ids, target_position_ids, target_position_ids), dim=1)
if target_span_ids is None:
target_span_ids = target_position_ids
attention_mask = self.create_attention_mask(
source_mask, target_mask, source_position_ids, target_span_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
sequence_output = outputs[0]
pseudo_sequence_output = sequence_output[:, source_len + target_len:, ]
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(pseudo_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), label_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), label_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask)
return pseudo_lm_loss
class TuringNLRv3ForSequenceToSequenceUniLMV1(TuringNLRv3ForSequenceToSequence):
MODEL_NAME = "TuringNLRv3ForSequenceToSequenceUniLMV1"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat(
(torch.zeros_like(source_position_ids), target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = torch.cat((source_mask, target_mask), dim=1).unsqueeze(1)
return ((true_tokens == 1) & (to_weight <= from_weight)).type_as(source_mask)
def forward(self, source_ids, target_ids, masked_ids, masked_pos, masked_weight, num_source_tokens, num_target_tokens):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
split_lengths = (source_len, target_len)
input_ids = torch.cat((source_ids, target_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=num_source_tokens)
position_ids = torch.cat(
(source_position_ids, target_position_ids), dim=1)
attention_mask = self.create_attention_mask(
source_mask, target_mask, source_position_ids, target_position_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
sequence_output = outputs[0]
target_sequence_output = sequence_output[:, source_len:, ]
masked_sequence_output = gather_seq_out_by_pos(
target_sequence_output, masked_pos)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(masked_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weight)
return pseudo_lm_loss
class TuringNLRv3ForSequenceClassification(TuringNLRv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = TuringNLRv3Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
# head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
# add hidden states and attention if they are here
outputs = (logits,) + outputs[:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
# (loss), logits, last_hidden_state, pooled_output, (hidden_states), (attentions)
return outputs
| 38,964 | 44.360885 | 146 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/utils.py | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import torch
import tqdm
import array
import collections
import torch.utils.data
from transformers.file_utils import WEIGHTS_NAME
try:
import lmdb
except:
pass
OPTIM_NAME = "optimizer.bin"
logger = logging.getLogger(__name__)
class TrainingExample(object):
def __init__(self, source_ids, target_ids, example_id):
self.source_ids = source_ids
self.target_ids = target_ids
self.example_id = example_id
class Seq2seqDatasetForTuringNLRv3(torch.utils.data.Dataset):
def __init__(
self, features, max_source_len, max_target_len,
vocab_size, cls_id, sep_id, pad_id, mask_id,
random_prob, keep_prob, offset, num_training_instances,
finetuning_method='v1', target_mask_prob=-1.0, num_max_mask_token=0,
source_mask_prob=-1.0,
):
self.features = features
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.offset = offset
if offset > 0:
logger.info(
" **** Set offset %d in Seq2seqDatasetForBert **** ", offset)
self.cls_id = cls_id
self.sep_id = sep_id
self.pad_id = pad_id
self.random_prob = random_prob
self.keep_prob = keep_prob
self.mask_id = mask_id
self.vocab_size = vocab_size
self.num_training_instances = num_training_instances
self.target_mask_prob = target_mask_prob
if finetuning_method == 'v0':
num_max_mask_token = self.max_target_len
logger.info("Mask way v0: set num_max_mask_token = %d" %
num_max_mask_token)
self.num_max_mask_token = num_max_mask_token
self.finetuning_method = finetuning_method
assert finetuning_method in ('v0', 'v1', 'v2')
self.source_mask_prob = source_mask_prob
def __len__(self):
return self.num_training_instances
def __trunk(self, ids, max_len, append_sep=True):
if append_sep:
max_len -= 1
if len(ids) > max_len:
ids = ids[:max_len]
if append_sep:
ids = ids + [self.sep_id]
return ids
def __pad(self, ids, max_len):
if len(ids) < max_len:
return ids + [self.pad_id] * (max_len - len(ids))
else:
assert len(ids) == max_len
return ids
def get_masked_token(self, tk_id):
p = random.random()
if p < self.keep_prob:
return tk_id
elif p < self.keep_prob + self.random_prob:
return random.randint(0, self.vocab_size - 1)
else:
return self.mask_id
def __getitem__(self, _idx):
idx = (self.offset + _idx) % len(self.features)
# print("%d get %d" % (_idx, idx))
feature = self.features[idx]
source_ids = self.__trunk([self.cls_id] + feature.source_ids,
self.max_source_len, append_sep=self.finetuning_method != 'v0')
target_ids = feature.target_ids
if self.finetuning_method == 'v0':
target_ids = [self.sep_id] + target_ids
target_ids = self.__trunk(
target_ids, self.max_target_len, append_sep=self.finetuning_method != 'v0')
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
if self.source_mask_prob > 0:
for i in range(num_source_tokens):
tk_id = source_ids[i]
if tk_id != self.cls_id and tk_id != self.sep_id:
r = random.random()
if r < self.source_mask_prob:
source_ids[i] = self.get_masked_token(tk_id)
source_ids = self.__pad(source_ids, self.max_source_len)
target_ids = self.__pad(target_ids, self.max_target_len)
if self.finetuning_method == 'v0':
masked_pos = []
masked_ids = []
masked_weights = []
for pos in range(num_target_tokens):
if pos + 1 != num_target_tokens:
masked_ids.append(target_ids[pos + 1])
else:
masked_ids.append(self.sep_id)
masked_pos.append(pos)
masked_weights.append(1)
r = random.random()
if r < self.target_mask_prob and pos > 0:
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(
masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.finetuning_method == 'v1':
masked_pos = list(range(num_target_tokens))
random.shuffle(masked_pos)
num_masked_token = \
min(self.num_max_mask_token, int(
self.target_mask_prob * num_target_tokens))
if num_masked_token <= 0:
num_masked_token = 1
masked_pos = masked_pos[:num_masked_token]
masked_ids = []
masked_weights = []
for pos in masked_pos:
masked_ids.append(target_ids[pos])
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_weights.append(1)
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(
masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.finetuning_method == 'v2':
pseudo_ids = []
label_ids = []
for pos in range(num_target_tokens):
tk_id = target_ids[pos]
masked_tk_id = self.get_masked_token(tk_id)
pseudo_ids.append(masked_tk_id)
label_ids.append(tk_id)
r = random.random()
if r < self.target_mask_prob:
target_ids[pos] = masked_tk_id
label_ids = self.__pad(label_ids, self.max_target_len)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len)
return source_ids, target_ids, label_ids, pseudo_ids, num_source_tokens, num_target_tokens
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(
output_dir, "ckpt-*/%s" % WEIGHTS_NAME))
fn_optim_list = glob.glob(os.path.join(
output_dir, "ckpt-*/%s" % OPTIM_NAME))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_model_list]
) & set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def get_checkpoint_state_dict(output_dir, ckpt):
model_recover_checkpoint = os.path.join(
output_dir, "ckpt-%d" % ckpt, WEIGHTS_NAME)
logger.info(" ** Recover model checkpoint in %s ** ",
model_recover_checkpoint)
model_state_dict = torch.load(model_recover_checkpoint, map_location='cpu')
optimizer_recover_checkpoint = os.path.join(
output_dir, "ckpt-%d" % ckpt, OPTIM_NAME)
checkpoint_state_dict = torch.load(
optimizer_recover_checkpoint, map_location='cpu')
checkpoint_state_dict['model'] = model_state_dict
return checkpoint_state_dict
def report_length(length_counter, total_count):
max_len = max(length_counter.keys())
a = 0
tc = 0
while a < max_len:
cc = 0
for i in range(16):
cc += length_counter[a + i]
tc += cc
if cc > 0:
logger.info("%d ~ %d = %d, %.2f%%" %
(a, a + 16, cc, (tc * 100.0) / total_count))
a += 16
def serialize_str(x):
return u"{}".format(x).encode('ascii')
def serialize_array(x, dtype):
data = array.array(dtype)
data.fromlist(x)
return data.tobytes()
def write_to_lmdb(db, key, value):
success = False
while not success:
txn = db.begin(write=True)
try:
txn.put(key, value)
txn.commit()
success = True
except lmdb.MapFullError:
txn.abort()
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit*2
print('>>> Doubling LMDB map size to %sMB ...' %
(new_limit >> 20,))
db.set_mapsize(new_limit) # double it
def deserialize_str(x):
return x.decode('ascii')
class DocDB(object):
def __init__(self, db_path):
self.db_path = db_path
self.env = lmdb.open(db_path, readonly=True,
lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.start_key_index = int(deserialize_str(txn.get(b'__start__')))
self.size = int(deserialize_str(txn.get(b'__size__')))
self.dtype = deserialize_str(txn.get(b'__dtype__'))
def _deserialize_array(self, x):
data = array.array(self.dtype)
data.frombytes(x)
return data.tolist()
def __getitem__(self, doc_id):
with self.env.begin(write=False) as txn:
# example = {
# "source_ids": self._deserialize_array(txn.get(b"src_ids_%d" % doc_id)),
# "target_ids": self._deserialize_array(txn.get(b"tgt_ids_%d" % doc_id)),
# }
example = TrainingExample(
source_ids=self._deserialize_array(
txn.get(b"src_ids_%d" % doc_id)),
target_ids=self._deserialize_array(
txn.get(b"tgt_ids_%d" % doc_id)),
example_id=None,
)
return example
def __len__(self):
return self.size
def load_and_cache_examples(
example_file, tokenizer, local_rank, cached_features_file, shuffle=True,
lmdb_cache=None, lmdb_dtype='h', eval_mode=False):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.isfile(cached_features_file):
logger.info("Loading features from cached file %s",
cached_features_file)
features = torch.load(cached_features_file)
elif cached_features_file is not None and os.path.isdir(cached_features_file) \
and os.path.exists(os.path.join(cached_features_file, 'lock.mdb')):
logger.info("Loading features from cached LMDB %s",
cached_features_file)
features = DocDB(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", example_file)
examples = []
with open(example_file, mode="r", encoding="utf-8") as reader:
for line in reader:
examples.append(json.loads(line))
features = []
slc = collections.defaultdict(int)
tlc = collections.defaultdict(int)
for example in tqdm.tqdm(examples):
if isinstance(example["src"], list):
source_tokens = example["src"]
target_tokens = [] if eval_mode else example["tgt"]
else:
source_tokens = tokenizer.tokenize(example["src"])
target_tokens = [] if eval_mode else tokenizer.tokenize(
example["tgt"])
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
slc[len(source_ids)] += 1
tlc[len(target_ids)] += 1
features.append(
TrainingExample(
source_ids=source_ids,
target_ids=target_ids,
example_id=len(features),
)
)
if shuffle:
random.shuffle(features)
logger.info("Shuffle the features !")
logger.info("Source length:")
report_length(slc, total_count=len(examples))
logger.info("Target length:")
report_length(tlc, total_count=len(examples))
if local_rank in [-1, 0] and cached_features_file is not None:
if lmdb_cache:
db = lmdb.open(cached_features_file,
readonly=False, map_async=True)
for idx, feature in enumerate(features):
write_to_lmdb(
db, b"src_ids_%d" % idx,
serialize_array(feature.source_ids, dtype=lmdb_dtype))
write_to_lmdb(
db, b"tgt_ids_%d" % idx,
serialize_array(feature.target_ids, dtype=lmdb_dtype))
write_to_lmdb(db, b"__start__", serialize_str(0))
write_to_lmdb(db, b"__size__", serialize_str(len(features)))
write_to_lmdb(db, b"__dtype__", serialize_str(lmdb_dtype))
db.sync()
db.close()
logger.info("db_key_idx = %d" % len(features))
del features
features = cached_features_file
logger.info("Saving features into cached lmdb dir %s",
cached_features_file)
else:
logger.info("Saving features into cached file %s",
cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
| 14,533 | 35.888325 | 119 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/modeling_decoding.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None,
reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
logger = logging.getLogger(__name__)
from transformers import WEIGHTS_NAME
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
relax_projection=0,
new_pos_ids=False,
initializer_range=0.02,
task_idx=None,
fp32_embedding=False,
ffn_type=0,
label_smoothing=None,
num_qkv=0,
seg_emb=False,
source_type_id=0,
target_type_id=1,
rel_pos_bins=0,
max_rel_pos=0, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.relax_projection = relax_projection
self.new_pos_ids = new_pos_ids
self.initializer_range = initializer_range
self.task_idx = task_idx
self.fp32_embedding = fp32_embedding
self.ffn_type = ffn_type
self.label_smoothing = label_smoothing
self.num_qkv = num_qkv
self.seg_emb = seg_emb
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
if config.type_vocab_size == 0:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
embeddings = words_embeddings + position_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if hasattr(config, 'num_qkv') and (config.num_qkv > 1):
self.num_qkv = config.num_qkv
else:
self.num_qkv = 1
self.query = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.key = nn.Linear(config.hidden_size,
self.all_head_size * self.num_qkv)
self.value = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.uni_debug_flag = True if os.getenv(
'UNI_DEBUG_FLAG', '') else False
if self.uni_debug_flag:
self.register_buffer('debug_attention_probs',
torch.zeros((512, 512)))
if hasattr(config, 'seg_emb') and config.seg_emb:
self.b_q_s = nn.Parameter(torch.zeros(
1, self.num_attention_heads, 1, self.attention_head_size))
self.seg_emb = nn.Embedding(
config.type_vocab_size, self.all_head_size)
else:
self.b_q_s = None
self.seg_emb = None
def transpose_for_scores(self, x, mask_qkv=None):
if self.num_qkv > 1:
sz = x.size()[:-1] + (self.num_qkv,
self.num_attention_heads, self.all_head_size)
# (batch, pos, num_qkv, head, head_hid)
x = x.view(*sz)
if mask_qkv is None:
x = x[:, :, 0, :, :]
elif isinstance(mask_qkv, int):
x = x[:, :, mask_qkv, :, :]
else:
# mask_qkv: (batch, pos)
if mask_qkv.size(1) > sz[1]:
mask_qkv = mask_qkv[:, :sz[1]]
# -> x: (batch, pos, head, head_hid)
x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand(
sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2)
else:
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
# (batch, pos, head, head_hid)
x = x.view(*sz)
# (batch, head, pos, head_hid)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None,
key_cache=None, value_cache=None, rel_pos=None,
):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(hidden_states, self.key.weight)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(x_states, self.key.weight)
mixed_value_layer = self.value(x_states)
if key_cache is not None and isinstance(key_cache, list):
key_cache.append(mixed_key_layer)
mixed_key_layer = torch.cat(key_cache, dim=1)
if value_cache is not None and isinstance(value_cache, list):
value_cache.append(mixed_value_layer)
mixed_value_layer = torch.cat(value_cache, dim=1)
query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv)
key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv)
value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv)
if key_history is not None and not isinstance(key_history, list):
key_layer = torch.cat((key_history, key_layer), dim=-2)
value_layer = torch.cat((value_history, value_layer), dim=-2)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch, head, pos, pos)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
if self.seg_emb is not None:
seg_rep = self.seg_emb(seg_ids)
# (batch, pos, head, head_hid)
seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size(
1), self.num_attention_heads, self.attention_head_size)
qs = torch.einsum('bnih,bjnh->bnij',
query_layer + self.b_q_s, seg_rep)
attention_scores = attention_scores + qs
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.uni_debug_flag:
_pos = attention_probs.size(-1)
self.debug_attention_probs[:_pos, :_pos].copy_(
attention_probs[0].mean(0).view(_pos, _pos))
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if isinstance(key_history, list):
key_history.append(key_layer)
if isinstance(value_history, list):
value_history.append(value_layer)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerFFN(nn.Module):
def __init__(self, config):
super(TransformerFFN, self).__init__()
self.ffn_type = config.ffn_type
assert self.ffn_type in (1, 2)
if self.ffn_type in (1, 2):
self.wx0 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (2,):
self.wx1 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (1, 2):
self.output = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x):
if self.ffn_type in (1, 2):
x0 = self.wx0(x)
if self.ffn_type == 1:
x1 = x
elif self.ffn_type == 2:
x1 = self.wx1(x)
out = self.output(x0 * x1)
out = self.dropout(out)
out = self.LayerNorm(out + x)
return out
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.ffn_type = config.ffn_type
if self.ffn_type:
self.ffn = TransformerFFN(config)
else:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
if self.ffn_type:
layer_output = self.ffn(attention_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None,
seg_ids=None, key_history=None, value_history=None, rel_pos=None):
# history embedding and encoded layer must be simultanously given
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for i, layer_module in enumerate(self.layer):
set_key = None
if isinstance(key_history, list):
set_key = key_history if len(key_history) < len(self.layer) else key_history[i]
set_value = None
if isinstance(value_history, list):
set_value = value_history if len(key_history) < len(self.layer) else value_history[i]
hidden_states = layer_module(
hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids,
key_history=set_key, value_history=set_value, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
hid_size = config.hidden_size
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
hid_size *= config.relax_projection
self.dense = nn.Linear(config.hidden_size, hid_size)
self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(
bert_model_embedding_weights.size(0)))
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
self.relax_projection = config.relax_projection
else:
self.relax_projection = 0
self.fp32_embedding = config.fp32_embedding
def convert_to_type(tensor):
if self.fp32_embedding:
return tensor.half()
else:
return tensor
self.type_converter = convert_to_type
self.converted = False
def forward(self, hidden_states, task_idx=None):
if not self.converted:
self.converted = True
if self.fp32_embedding:
self.transform.half()
hidden_states = self.transform(self.type_converter(hidden_states))
if self.relax_projection > 1:
num_batch = hidden_states.size(0)
num_pos = hidden_states.size(1)
# (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid)
hidden_states = hidden_states.view(
num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
if self.fp32_embedding:
hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter(
self.decoder.weight), self.type_converter(self.bias))
else:
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights, num_labels=2):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, num_labels)
def forward(self, sequence_output, pooled_output, task_idx=None):
prediction_scores = self.predictions(sequence_output, task_idx)
if pooled_output is None:
seq_relationship_score = None
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
# module.weight.data.copy_(torch.Tensor(
# truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range))
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
logger.info("Model config {}".format(config))
# clean the arguments in kwargs
for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx',
'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing',
'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb',
'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'):
if arg_clean in kwargs:
del kwargs[arg_clean]
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.config = config
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None, rel_pos=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
if self.rel_pos_bias is not None:
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
# print("Rel pos size = %s" % str(rel_pos.size()))
else:
rel_pos = None
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids, rel_pos=rel_pos)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, mask_qkv=None, task_idx=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertPreTrainingPairTransform(nn.Module):
def __init__(self, config):
super(BertPreTrainingPairTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, pair_x, pair_y):
hidden_states = torch.cat([pair_x, pair_y], dim=-1)
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
# hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class BertForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s",
pos_shift=False):
super(BertForSeq2SeqDecoder, self).__init__(config)
self.bert = BertModelIncr(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
assert mode in ("s2s", "l2r")
self.mode = mode
self.pos_shift = pos_shift
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask,
task_idx=task_idx, mask_qkv=mask_qkv)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = new_embedding
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
else:
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
# print("Rel pos size = %s" % str(rel_pos.size()))
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos - input_length + 1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.div(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores))
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding)
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
else:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
if rel_pos is not None:
rel_pos = first_expand(rel_pos)
mask_ids = first_expand(mask_ids)
if mask_qkv is not None:
mask_qkv = first_expand(mask_qkv)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n - 1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not (
self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
# [(batch, beam)]
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
# first we need to find the eos frame where all symbols are eos
# any frames after the eos frame are invalid
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[i, :length, ...] = tensor
return out_tensor
# convert to tensors for DataParallel
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| 67,538 | 44.944898 | 139 | py |
DDOD | DDOD-main/setup.py | #!/usr/bin/env python
import os
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='OpenMMLab',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 5,899 | 35.196319 | 125 | py |
DDOD | DDOD-main/coco_cfg/atss_r50_1x.py | fp16 = dict(loss_scale=512.)
model = dict(
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='data/pretrain_models/resnet50-0676ba61.pth')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSIoUHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 4,154 | 29.777778 | 99 | py |
DDOD | DDOD-main/coco_cfg/ddod_r50_1x.py | fp16 = dict(loss_scale=512.)
model = dict(
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='data/pretrain_models/resnet50-0676ba61.pth')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='DDODHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSCostAssigner', topk=9),
reg_assigner=dict(type='ATSSCostAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 4,226 | 30.080882 | 99 | py |
DDOD | DDOD-main/coco_cfg/ddod_r50_1x_fcos.py | fp16 = dict(loss_scale=512.)
model = dict(
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='data/pretrain_models/resnet50-0676ba61.pth')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='DDODFCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
# bbox_coder=dict(
# type='DeltaXYWHBBoxCoder',
# target_means=[.0, .0, .0, .0],
# target_stds=[0.1, 0.1, 0.2, 0.2]),
bbox_coder=dict(
type='TBLRCenterCoder',
normalizer=1/8.,
normalize_by_wh=True
),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSCostAssigner', topk=9),
reg_assigner=dict(type='ATSSCostAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=12, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 4,374 | 30.028369 | 99 | py |
DDOD | DDOD-main/tools/test.py | import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == '__main__':
main()
| 9,315 | 38.142857 | 79 | py |
DDOD | DDOD-main/tools/train.py | import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 6,914 | 35.587302 | 79 | py |
DDOD | DDOD-main/tools/deployment/mmdet2torchserve.py | from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,645 | 32.145455 | 78 | py |
DDOD | DDOD-main/tools/deployment/onnx2tensorrt.py | import argparse
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import torch
from mmcv import Config
from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine
from mmdet.core.export import preprocess_example_input
from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector,
TensorRTDetector)
from mmdet.datasets import DATASETS
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def onnx2tensorrt(onnx_file,
trt_file,
input_config,
verify=False,
show=False,
workspace_size=1,
verbose=False):
import tensorrt as trt
onnx_model = onnx.load(onnx_file)
max_shape = input_config['max_shape']
min_shape = input_config['min_shape']
opt_shape = input_config['opt_shape']
fp16_mode = False
# create trt engine and wraper
opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
img_list = [_.cuda().contiguous() for _ in img_list]
# wrap ONNX and TensorRT model
onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0)
trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0)
# inference with wrapped model
with torch.no_grad():
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
trt_results = trt_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
if show:
out_file_ort, out_file_trt = None, None
else:
out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png'
show_img = one_meta['show_img']
score_thr = 0.3
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
trt_model.show_result(
show_img,
trt_results,
score_thr=score_thr,
show=True,
win_name='TensorRT',
out_file=out_file_trt)
with_mask = trt_model.with_masks
# compare a part of result
if with_mask:
compare_pairs = list(zip(onnx_results, trt_results))
else:
compare_pairs = [(onnx_results, trt_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models from ONNX to TensorRT')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Filename of input ONNX model')
parser.add_argument(
'--trt-file',
type=str,
default='tmp.trt',
help='Filename of output TensorRT engine')
parser.add_argument(
'--input-img', type=str, default='', help='Image for test')
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be \
removed in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--verbose',
action='store_true',
help='Whether to verbose logging messages while creating \
TensorRT engine. Defaults to False.')
parser.add_argument(
'--to-rgb',
action='store_false',
help='Feed model with RGB or BGR image. Default is RGB. This \
argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[400, 600],
help='Input size of the model')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='Mean value used for preprocess input data. This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='Variance value used for preprocess input data. \
This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--min-shape',
type=int,
nargs='+',
default=None,
help='Minimum input size of the model in TensorRT')
parser.add_argument(
'--max-shape',
type=int,
nargs='+',
default=None,
help='Maximum input size of the model in TensorRT')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
args = parser.parse_args()
return args
if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
warnings.warn(
'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and will be \
removed in future releases.')
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.jpg')
cfg = Config.fromfile(args.config)
def parse_shape(shape):
if len(shape) == 1:
shape = (1, 3, shape[0], shape[0])
elif len(args.shape) == 2:
shape = (1, 3) + tuple(shape)
else:
raise ValueError('invalid input shape')
return shape
if args.shape:
input_shape = parse_shape(args.shape)
else:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
if not args.max_shape:
max_shape = input_shape
else:
max_shape = parse_shape(args.max_shape)
if not args.min_shape:
min_shape = input_shape
else:
min_shape = parse_shape(args.min_shape)
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
CLASSES = dataset.CLASSES
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
input_config = {
'min_shape': min_shape,
'opt_shape': input_shape,
'max_shape': max_shape,
'input_shape': input_shape,
'input_path': args.input_img,
'normalize_cfg': normalize_cfg
}
# Create TensorRT engine
onnx2tensorrt(
args.model,
args.trt_file,
input_config,
verify=args.verify,
show=args.show,
workspace_size=args.workspace_size,
verbose=args.verbose)
| 8,467 | 32.338583 | 78 | py |
DDOD | DDOD-main/tools/deployment/mmdet_handler.py | import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
class_name: bbox_coords,
'score': score
})
return output
| 2,462 | 34.185714 | 79 | py |
DDOD | DDOD-main/tools/deployment/pytorch2onnx.py | import argparse
import os.path as osp
import warnings
from functools import partial
import numpy as np
import onnx
import torch
from mmcv import Config, DictAction
from mmdet.core.export import build_model_from_cfg, preprocess_example_input
from mmdet.core.export.model_wrappers import ONNXRuntimeDetector
def pytorch2onnx(model,
input_img,
input_shape,
normalize_cfg,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
test_img=None,
do_simplify=False,
dynamic_export=None):
input_config = {
'input_shape': input_shape,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
# replace original forward function
origin_forward = model.forward
model.forward = partial(
model.forward,
img_metas=img_meta_list,
return_loss=False,
rescale=False)
output_names = ['dets', 'labels']
if model.with_mask:
output_names.append('masks')
input_name = 'input'
dynamic_axes = None
if dynamic_export:
dynamic_axes = {
input_name: {
0: 'batch',
2: 'width',
3: 'height'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
if model.with_mask:
dynamic_axes['masks'] = {0: 'batch', 1: 'num_dets'}
torch.onnx.export(
model,
img_list,
output_file,
input_names=[input_name],
output_names=output_names,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
model.forward = origin_forward
# get the custom op path
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
if do_simplify:
import onnxsim
from mmdet import digit_version
min_required_version = '0.3.0'
assert digit_version(onnxsim.__version__) >= digit_version(
min_required_version
), f'Requires to install onnx-simplify>={min_required_version}'
input_dic = {'input': img_list[0].detach().cpu().numpy()}
onnxsim.simplify(
output_file, input_data=input_dic, custom_lib=ort_custom_op_path)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# wrap onnx model
onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0)
if dynamic_export:
# scale up to test dynamic shape
h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]]
h, w = min(1344, h), min(1344, w)
input_config['input_shape'] = (1, 3, h, w)
if test_img is None:
input_config['input_path'] = input_img
# prepare input once again
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
# get pytorch output
pytorch_results = model(
img_list, img_metas=img_meta_list, return_loss=False,
rescale=True)[0]
img_list = [_.cuda().contiguous() for _ in img_list]
if dynamic_export:
img_list = img_list + [_.flip(-1).contiguous() for _ in img_list]
img_meta_list = img_meta_list * 2
# get onnx output
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
# visualize predictions
score_thr = 0.3
if show:
out_file_ort, out_file_pt = None, None
else:
out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png'
show_img = one_meta['show_img']
model.show_result(
show_img,
pytorch_results,
score_thr=score_thr,
show=True,
win_name='PyTorch',
out_file=out_file_pt)
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
# compare a part of result
if model.with_mask:
compare_pairs = list(zip(onnx_results, pytorch_results))
else:
compare_pairs = [(onnx_results, pytorch_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--input-img', type=str, help='Images for input')
parser.add_argument(
'--show',
action='store_true',
help='Show onnx graph and detection outputs')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--test-img', type=str, default=None, help='Images for test')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be removed \
in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--simplify',
action='store_true',
help='Whether to simplify onnx model.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[800, 1216],
help='input image size')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='mean value used for preprocess input data.This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='variance value used for preprocess input data. '
'This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export onnx with dynamic axis.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and \
will be removed in future releases.')
assert args.opset_version == 11, 'MMDet only support opset 11 now'
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(args.opset_version)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.shape is None:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
elif len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
# build the model and load checkpoint
model = build_model_from_cfg(args.config, args.checkpoint,
args.cfg_options)
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg')
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
# convert model to onnx file
pytorch2onnx(
model,
args.input_img,
input_shape,
normalize_cfg,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
test_img=args.test_img,
do_simplify=args.simplify,
dynamic_export=args.dynamic_export)
| 10,374 | 32.905229 | 79 | py |
DDOD | DDOD-main/tools/model_converters/selfsup2mmdet.py | import argparse
from collections import OrderedDict
import torch
def moco_convert(src, dst):
"""Convert keys in pycls pretrained moco models to mmdet style."""
# load caffe model
moco_model = torch.load(src)
blobs = moco_model['state_dict']
# convert to pytorch style
state_dict = OrderedDict()
for k, v in blobs.items():
if not k.startswith('module.encoder_q.'):
continue
old_k = k
k = k.replace('module.encoder_q.', '')
state_dict[k] = v
print(old_k, '->', k)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--selfsup', type=str, choices=['moco', 'swav'], help='save path')
args = parser.parse_args()
if args.selfsup == 'moco':
moco_convert(args.src, args.dst)
elif args.selfsup == 'swav':
print('SWAV does not need to convert the keys')
if __name__ == '__main__':
main()
| 1,195 | 27.47619 | 74 | py |
DDOD | DDOD-main/tools/model_converters/publish_model.py | import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,253 | 28.162791 | 78 | py |
DDOD | DDOD-main/tools/model_converters/regnet2mmdet.py | import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,015 | 32.511111 | 77 | py |
DDOD | DDOD-main/tools/model_converters/upgrade_model_version.py | import argparse
import re
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def is_head(key):
valid_head_list = [
'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'
]
return any(key.startswith(h) for h in valid_head_list)
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
is_two_stage = True
is_ssd = False
is_retina = False
reg_cls_agnostic = False
if 'rpn_head' not in config.model:
is_two_stage = False
# check whether it is SSD
if config.model.bbox_head.type == 'SSDHead':
is_ssd = True
elif config.model.bbox_head.type == 'RetinaHead':
is_retina = True
elif isinstance(config.model['bbox_head'], list):
reg_cls_agnostic = True
elif 'reg_class_agnostic' in config.model.bbox_head:
reg_cls_agnostic = config.model.bbox_head \
.reg_class_agnostic
temp_file.close()
return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
def reorder_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_cls for softmax output
if out_channels != num_classes and out_channels % num_classes == 0:
new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])
new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)
new_val = new_val.reshape(val.size())
# fc_cls
elif out_channels == num_classes:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# agnostic | retina_cls | rpn_cls
else:
new_val = val
return new_val
def truncate_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
if val.size(0) % num_classes == 0:
new_val = val[:num_classes - 1]
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_logits
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def truncate_reg_channel(val, num_classes=81):
# bias
if val.dim() == 1:
# fc_reg | rpn_reg
if val.size(0) % num_classes == 0:
new_val = val.reshape(num_classes, -1)[:num_classes - 1]
new_val = new_val.reshape(-1)
# agnostic
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# fc_reg | rpn_reg
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, -1, in_channels,
*val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def convert(in_file, out_file, num_classes):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(
'#' + meta_info['config'])
if meta_info['mmdet_version'] <= '0.5.3' and is_retina:
upgrade_retina = True
else:
upgrade_retina = False
# MMDetection v2.5.0 unifies the class order in RPN
# if the model is trained in version<v2.5.0
# The RPN model should be upgraded to be used in version>=2.5.0
if meta_info['mmdet_version'] < '2.5.0':
upgrade_rpn = True
else:
upgrade_rpn = False
for key, val in in_state_dict.items():
new_key = key
new_val = val
if is_two_stage and is_head(key):
new_key = 'roi_head.{}'.format(key)
# classification
if upgrade_rpn:
m = re.search(
r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
else:
m = re.search(
r'(conv_cls|retina_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
if m is not None:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
# regression
if upgrade_rpn:
m = re.search(r'(fc_reg).(weight|bias)', new_key)
else:
m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)
if m is not None and not reg_cls_agnostic:
print(f'truncate regression channels of {new_key}')
new_val = truncate_reg_channel(val, num_classes)
# mask head
m = re.search(r'(conv_logits).(weight|bias)', new_key)
if m is not None:
print(f'truncate mask prediction channels of {new_key}')
new_val = truncate_cls_channel(val, num_classes)
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
# Legacy issues in RetinaNet since V1.x
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
if m is not None and upgrade_retina:
param = m.groups()[1]
new_key = key.replace(param, f'conv.{param}')
out_state_dict[new_key] = val
print(f'rename the name of {key} to {new_key}')
continue
m = re.search(r'(cls_convs).\d.(weight|bias)', key)
if m is not None and is_ssd:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
out_state_dict[new_key] = new_val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
parser.add_argument(
'--num-classes',
type=int,
default=81,
help='number of classes of the original model')
args = parser.parse_args()
convert(args.in_file, args.out_file, args.num_classes)
if __name__ == '__main__':
main()
| 6,800 | 31.385714 | 79 | py |
DDOD | DDOD-main/tools/model_converters/upgrade_ssd_version.py | import argparse
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,741 | 29.034483 | 78 | py |
DDOD | DDOD-main/tools/model_converters/detectron2pytorch.py | import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = mmcv.load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
| 3,530 | 41.542169 | 78 | py |
DDOD | DDOD-main/tools/analysis_tools/benchmark.py | import argparse
import os
import time
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import init_dist, load_checkpoint, wrap_fp16_model
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def measure_inferense_speed(cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn):
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=True,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, checkpoint, map_location='cpu')
if is_fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Done image [{i + 1:<3}/ {max_iter}], '
f'fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
if (i + 1) == max_iter:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Overall fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
break
return fps
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher, **cfg.dist_params)
measure_inferense_speed(cfg, args.checkpoint, args.max_iter,
args.log_interval, args.fuse_conv_bn)
if __name__ == '__main__':
main()
| 4,795 | 32.538462 | 78 | py |
DDOD | DDOD-main/tools/analysis_tools/get_flops.py | import argparse
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 2,566 | 30.304878 | 79 | py |
DDOD | DDOD-main/tools/analysis_tools/test_robustness.py | import argparse
import copy
import os
import os.path as osp
import mmcv
import torch
from mmcv import DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tools.analysis_tools.robustness_eval import get_results
from mmdet import datasets
from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test
from mmdet.core import eval_map
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def coco_eval_with_return(result_files,
result_types,
coco,
max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in ['proposal', 'bbox', 'segm', 'keypoints']
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if res_type == 'segm' or res_type == 'bbox':
metric_names = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
]
eval_results[res_type] = {
metric_names[i]: cocoEval.stats[i]
for i in range(len(metric_names))
}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
def voc_eval_with_return(result_file,
dataset,
iou_thr=0.5,
logger='print',
only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
mean_ap, eval_results = eval_map(
det_results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
logger=logger)
if only_ap:
eval_results = [{
'ap': eval_results[i]['ap']
} for i in range(len(eval_results))]
return mean_ap, eval_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for pascal voc evaluation')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument(
'--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.show_dir, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out", "--show" or "show-dir"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.workers == 0:
args.workers = cfg.data.workers_per_gpu
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed)
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
rank, _ = get_dist_info()
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_data_cfg['pipeline'].insert(1, corruption_trans)
# print info
print(f'\nTesting {corruption} at severity {corruption_severity}')
# build the dataloader
# TODO: support multiple images per gpu
# (only minor changes are needed)
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=args.workers,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(
model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints,
# this walkaround is for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
show_dir = args.show_dir
if show_dir is not None:
show_dir = osp.join(show_dir, corruption)
show_dir = osp.join(show_dir, str(corruption_severity))
if not osp.exists(show_dir):
osp.makedirs(show_dir)
outputs = single_gpu_test(model, data_loader, args.show,
show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if args.out and rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if cfg.dataset_type == 'VOCDataset':
if eval_types:
for eval_type in eval_types:
if eval_type == 'bbox':
test_dataset = mmcv.runner.obj_from_dict(
cfg.data.test, datasets)
logger = 'print' if args.summaries else None
mean_ap, eval_results = \
voc_eval_with_return(
args.out, test_dataset,
args.iou_thr, logger)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation \
is supported for pascal voc')
else:
if eval_types:
print(f'Starting evaluate {" and ".join(eval_types)}')
if eval_types == ['proposal_fast']:
result_file = args.out
else:
if not isinstance(outputs[0], dict):
result_files = dataset.results2json(
outputs, args.out)
else:
for name in outputs[0]:
print(f'\nEvaluating {name}')
outputs_ = [out[name] for out in outputs]
result_file = args.out
+ f'.{name}'
result_files = dataset.results2json(
outputs_, result_file)
eval_results = coco_eval_with_return(
result_files, eval_types, dataset.coco)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;'
'\nUse --eval to select a task')
# save results after each evaluation
mmcv.dump(aggregated_results, eval_results_filename)
if rank == 0:
# print final results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 15,373 | 38.319693 | 79 | py |
DDOD | DDOD-main/crowd_code/utils/SGD_bias.py | import torch
from torch.optim.optimizer import Optimizer, required
class SGD(Optimizer):
"""Implements stochastic gradient descent (optionally with momentum).
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self):
"""Performs a single optimization step.
"""
loss = None
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad
if weight_decay != 0 and p.dim() > 1:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
return loss
| 2,777 | 39.26087 | 88 | py |
DDOD | DDOD-main/.dev_scripts/benchmark_filter.py | import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
| 7,048 | 41.209581 | 92 | py |
DDOD | DDOD-main/.dev_scripts/gather_models.py | import argparse
import glob
import json
import os.path as osp
import shutil
import subprocess
from collections import OrderedDict
import mmcv
import torch
import yaml
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
return final_file
def get_final_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
return cfg.runner.max_epochs
def get_real_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
epoch *= cfg.data.train.times
return epoch
def get_final_results(log_json_path, epoch, results_lut):
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines():
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if log_line['mode'] == 'train' and log_line['epoch'] == epoch:
result_dict['memory'] = log_line['memory']
if log_line['mode'] == 'val' and log_line['epoch'] == epoch:
result_dict.update({
key: log_line[key]
for key in results_lut if key in log_line
})
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face')
cfg = mmcv.Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
epochs = get_real_epoch(model['config'])
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
meta_data['Epochs'] = epochs
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
args = parser.parse_args()
return args
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mmcv.mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
# check whether the exps is finished
final_epoch = get_final_epoch(used_config)
final_model = 'epoch_{}.pth'.format(final_epoch)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
cfg = mmcv.Config.fromfile('./configs/' + used_config)
results_lut = cfg.evaluation.metric
if not isinstance(results_lut, list):
results_lut = [results_lut]
# case when using VOC, the evaluation key is only 'mAP'
results_lut = [key + '_mAP' for key in results_lut if 'mAP' not in key]
model_performance = get_final_results(log_json_path, final_epoch,
results_lut)
if model_performance is None:
continue
model_time = osp.split(log_txt_path)[-1].split('.')[0]
model_infos.append(
dict(
config=used_config,
results=model_performance,
epochs=final_epoch,
model_time=model_time,
log_json_path=osp.split(log_json_path)[-1]))
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mmcv.mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['model_time']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
'epoch_{}.pth'.format(model['epochs']))
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(
osp.join(models_root, model['config'], model['log_json_path']),
osp.join(model_publish_dir, f'{model_name}.log.json'))
shutil.copy(
osp.join(models_root, model['config'],
model['log_json_path'].rstrip('.json')),
osp.join(model_publish_dir, f'{model_name}.log'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_cconfig_path = osp.split(config_path)[-1]
shutil.copy(config_path,
osp.join(model_publish_dir, target_cconfig_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
mmcv.dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 9,240 | 34.817829 | 79 | py |
DDOD | DDOD-main/.dev_scripts/benchmark_inference_fps.py | import argparse
import os
import os.path as osp
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from tools.analysis_tools.benchmark import measure_inferense_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=400, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=40, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = measure_inferense_speed(cfg, checkpoint, args.max_iter,
args.log_interval,
args.fuse_conv_bn)
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000/fps:.{args.round_num}f} ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{config} error: {repr(e)}')
result_dict[cfg_path] = 0
if args.out:
mmcv.mkdir_or_exist(args.out)
mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
| 3,578 | 37.074468 | 79 | py |
DDOD | DDOD-main/.dev_scripts/batch_test_list.py | # yapf: disable
atss = dict(
config='configs/atss/atss_r50_fpn_1x_coco.py',
checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
autoassign = dict(
config='configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
carafe = dict(
config='configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.6),
)
cascade_rcnn = [
dict(
config='configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
eval='bbox',
metric=dict(bbox_mAP=40.3),
),
dict(
config='configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
),
]
cascade_rpn = dict(
config='configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
centripetalnet = dict(
config='configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py', # noqa
checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.7),
)
cornernet = dict(
config='configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py',
checkpoint='cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.2),
)
dcn = dict(
config='configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
eval='bbox',
metric=dict(bbox_mAP=41.3),
)
deformable_detr = dict(
config='configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.5),
)
detectors = dict(
config='configs/detectors/detectors_htc_r50_1x_coco.py',
checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
)
detr = dict(
config='configs/detr/detr_r50_8x2_150e_coco.py',
checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
eval='bbox',
metric=dict(bbox_mAP=40.1),
)
double_heads = dict(
config='configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
dynamic_rcnn = dict(
config='configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
eval='bbox',
metric=dict(bbox_mAP=38.9),
)
empirical_attention = dict(
config='configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py', # noqa
checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
faster_rcnn = dict(
config='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
fcos = dict(
config='configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py', # noqa
checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
foveabox = dict(
config='configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
free_anchor = dict(
config='configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
fsaf = dict(
config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
gcnet = dict(
config='configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
)
gfl = dict(
config='configs/gfl/gfl_r50_fpn_1x_coco.py',
checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
eval='bbox',
metric=dict(bbox_mAP=40.2),
)
gn = dict(
config='configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
)
gn_ws = dict(
config='configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
eval='bbox',
metric=dict(bbox_mAP=39.7),
)
grid_rcnn = dict(
config='configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
groie = dict(
config='configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
guided_anchoring = [
dict(
config='configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py', # noqa
checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
),
dict(
config='configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
checkpoint='ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.6),
),
]
hrnet = dict(
config='configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py',
checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
htc = dict(
config='configs/htc/htc_r50_fpn_1x_coco.py',
checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
)
libra_rcnn = dict(
config='configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
mask_rcnn = dict(
config='configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
)
ms_rcnn = dict(
config='configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
)
nas_fcos = dict(
config='configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', # noqa
checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
nas_fpn = dict(
config='configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
paa = dict(
config='configs/paa/paa_r50_fpn_1x_coco.py',
checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
pafpn = dict(
config='configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
pisa = dict(
config='configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
eval='bbox',
metric=dict(bbox_mAP=38.4),
)
point_rend = dict(
config='configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
)
regnet = dict(
config='configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
)
reppoints = dict(
config='configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py',
checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
res2net = dict(
config='configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
eval='bbox',
metric=dict(bbox_mAP=43.0),
)
resnest = dict(
config='configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py', # noqa
checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.0),
)
retinanet = dict(
config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
eval='bbox',
metric=dict(bbox_mAP=36.5),
)
rpn = dict(
config='configs/rpn/rpn_r50_fpn_1x_coco.py',
checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
eval='proposal_fast',
metric=dict(AR_1000=58.2),
)
sabl = [
dict(
config='configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py ',
checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.7),
),
dict(
config='configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
eval='bbox',
metric=dict(bbox_mAP=39.9),
),
]
scnet = dict(
config='configs/scnet/scnet_r50_fpn_1x_coco.py',
checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
eval='bbox',
metric=dict(bbox_mAP=43.5),
)
sparse_rcnn = dict(
config='configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
ssd = dict(
config='configs/ssd/ssd300_coco.py',
checkpoint='ssd300_coco_20200307-a92d2092.pth',
eval='bbox',
metric=dict(bbox_mAP=25.6),
)
tridentnet = dict(
config='configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
eval='bbox',
metric=dict(bbox_mAP=37.6),
)
vfnet = dict(
config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
eval='bbox',
metric=dict(bbox_mAP=41.6),
)
yolact = dict(
config='configs/yolact/yolact_r50_1x8_coco.py',
checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
)
yolo = dict(
config='configs/yolo/yolov3_d53_320_273e_coco.py',
checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
eval='bbox',
metric=dict(bbox_mAP=27.9),
)
yolof = dict(
config='configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
centernet = dict(
config='configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
checkpoint='centernet_resnet18_dcnv2_140e_coco_20210520_101209-da388ba2.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=29.5),
)
# yapf: enable
| 12,184 | 34.318841 | 117 | py |
DDOD | DDOD-main/tests/test_runtime/async_benchmark.py | import asyncio
import os
import shutil
import urllib
import mmcv
import torch
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.utils.contextmanagers import concurrent
from mmdet.utils.profiling import profile_time
async def main():
"""Benchmark between async and synchronous inference interfaces.
Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x:
async sync
7981.79 ms 9660.82 ms
8074.52 ms 9660.94 ms
7976.44 ms 9406.83 ms
Async variant takes about 0.83-0.85 of the time of the synchronous
interface.
"""
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(
project_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
checkpoint_file = os.path.join(
project_dir,
'checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
if not os.path.exists(checkpoint_file):
url = ('https://download.openmmlab.com/mmdetection/v2.0'
'/mask_rcnn/mask_rcnn_r50_fpn_1x_coco'
'/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
print(f'Downloading {url} ...')
local_filename, _ = urllib.request.urlretrieve(url)
os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
shutil.move(local_filename, checkpoint_file)
print(f'Saved as {checkpoint_file}')
else:
print(f'Using existing checkpoint {checkpoint_file}')
device = 'cuda:0'
model = init_detector(
config_file, checkpoint=checkpoint_file, device=device)
# queue is used for concurrent inference of multiple images
streamqueue = asyncio.Queue()
# queue size defines concurrency level
streamqueue_size = 4
for _ in range(streamqueue_size):
streamqueue.put_nowait(torch.cuda.Stream(device=device))
# test a single image and show the results
img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg'))
# warmup
await async_inference_detector(model, img)
async def detect(img):
async with concurrent(streamqueue):
return await async_inference_detector(model, img)
num_of_images = 20
with profile_time('benchmark', 'async'):
tasks = [
asyncio.create_task(detect(img)) for _ in range(num_of_images)
]
async_results = await asyncio.gather(*tasks)
with torch.cuda.stream(torch.cuda.default_stream()):
with profile_time('benchmark', 'sync'):
sync_results = [
inference_detector(model, img) for _ in range(num_of_images)
]
result_dir = os.path.join(project_dir, 'demo')
model.show_result(
img,
async_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_async.jpg'))
model.show_result(
img,
sync_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_sync.jpg'))
if __name__ == '__main__':
asyncio.run(main())
| 3,167 | 30.058824 | 77 | py |
DDOD | DDOD-main/tests/test_runtime/test_async.py | """Tests for async interface."""
import asyncio
import os
import sys
import asynctest
import mmcv
import torch
from mmdet.apis import async_inference_detector, init_detector
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import concurrent
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
class MaskRCNNDetector:
def __init__(self,
model_config,
checkpoint=None,
streamqueue_size=3,
device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
# build the model and load checkpoint
self.model = init_detector(
model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if sys.version_info >= (3, 7):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = await async_inference_detector(self.model, img)
return result
class AsyncInferenceTestCase(AsyncTestCase):
if sys.version_info >= (3, 7):
async def test_simple_inference(self):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
ori_grad_enabled = torch.is_grad_enabled()
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(
root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
detector = MaskRCNNDetector(model_config)
await detector.init()
img_path = os.path.join(root_dir, 'demo/demo.jpg')
bboxes, _ = await detector.apredict(img_path)
self.assertTrue(bboxes)
# asy inference detector will hack grad_enabled,
# so restore here to avoid it to influence other tests
torch.set_grad_enabled(ori_grad_enabled)
| 2,560 | 29.855422 | 75 | py |
DDOD | DDOD-main/tests/test_runtime/test_config.py | from os.path import dirname, exists, join, relpath
from unittest.mock import Mock
import pytest
import torch
from mmcv.runner import build_optimizer
from mmdet.core import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.utils import NumClassCheckHook
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
repo_dpath = join(repo_dpath, '..')
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _check_numclasscheckhook(detector, config_mod):
dummy_runner = Mock()
dummy_runner.model = detector
def get_dataset_name_classes(dataset):
# deal with `RepeatDataset`,`ConcatDataset`,`ClassBalancedDataset`..
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
while ('dataset' in dataset):
dataset = dataset['dataset']
# ConcatDataset
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
return dataset['type'], dataset.get('classes', None)
compatible_check = NumClassCheckHook()
dataset_name, CLASSES = get_dataset_name_classes(
config_mod['data']['train'])
if CLASSES is None:
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_train_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_train_epoch(dummy_runner)
dataset_name, CLASSES = get_dataset_name_classes(config_mod['data']['val'])
if CLASSES is None:
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_val_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_val_epoch(dummy_runner)
def test_config_build_detector():
"""Test that all detection models defined in the configs can be
initialized."""
from mmcv import Config
from mmdet.models import build_detector
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
import glob
config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py')))
config_fpaths = [
p for p in config_fpaths
if p.find('_base_') == -1 and p.find('common') == -1
]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = Config.fromfile(config_fpath)
config_mod.model
print(f'Building detector, config_fpath = {config_fpath}')
# Remove pretrained keys to allow for testing in an offline environment
if 'pretrained' in config_mod.model:
config_mod.model['pretrained'] = None
detector = build_detector(config_mod.model)
assert detector is not None
_check_numclasscheckhook(detector, config_mod)
optimizer = build_optimizer(detector, config_mod.optimizer)
assert isinstance(optimizer, torch.optim.Optimizer)
if 'roi_head' in config_mod.model.keys():
# for two stage detector
# detectors must have bbox head
assert detector.roi_head.with_bbox and detector.with_bbox
assert detector.roi_head.with_mask == detector.with_mask
head_config = config_mod.model['roi_head']
_check_roi_head(head_config, detector.roi_head)
# else:
# # for single stage detector
# # detectors must have bbox head
# # assert detector.with_bbox
# head_config = config_mod.model['bbox_head']
# _check_bbox_head(head_config, detector.bbox_head)
def _check_roi_head(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
# check roi_align
bbox_roi_cfg = config.bbox_roi_extractor
bbox_roi_extractor = head.bbox_roi_extractor
_check_roi_extractor(bbox_roi_cfg, bbox_roi_extractor)
# check bbox head infos
bbox_cfg = config.bbox_head
bbox_head = head.bbox_head
_check_bbox_head(bbox_cfg, bbox_head)
if head.with_mask:
# check roi_align
if config.mask_roi_extractor:
mask_roi_cfg = config.mask_roi_extractor
mask_roi_extractor = head.mask_roi_extractor
_check_roi_extractor(mask_roi_cfg, mask_roi_extractor,
bbox_roi_extractor)
# check mask head infos
mask_head = head.mask_head
mask_cfg = config.mask_head
_check_mask_head(mask_cfg, mask_head)
# check arch specific settings, e.g., cascade/htc
if config['type'] in ['CascadeRoIHead', 'HybridTaskCascadeRoIHead']:
assert config.num_stages == len(head.bbox_head)
assert config.num_stages == len(head.bbox_roi_extractor)
if head.with_mask:
assert config.num_stages == len(head.mask_head)
assert config.num_stages == len(head.mask_roi_extractor)
elif config['type'] in ['MaskScoringRoIHead']:
assert (hasattr(head, 'mask_iou_head')
and head.mask_iou_head is not None)
mask_iou_cfg = config.mask_iou_head
mask_iou_head = head.mask_iou_head
assert (mask_iou_cfg.fc_out_channels ==
mask_iou_head.fc_mask_iou.in_features)
elif config['type'] in ['GridRoIHead']:
grid_roi_cfg = config.grid_roi_extractor
grid_roi_extractor = head.grid_roi_extractor
_check_roi_extractor(grid_roi_cfg, grid_roi_extractor,
bbox_roi_extractor)
config.grid_head.grid_points = head.grid_head.grid_points
def _check_roi_extractor(config, roi_extractor, prev_roi_extractor=None):
import torch.nn as nn
# Separate roi_extractor and prev_roi_extractor checks for flexibility
if isinstance(roi_extractor, nn.ModuleList):
roi_extractor = roi_extractor[0]
if prev_roi_extractor and isinstance(prev_roi_extractor, nn.ModuleList):
prev_roi_extractor = prev_roi_extractor[0]
assert (len(config.featmap_strides) == len(roi_extractor.roi_layers))
assert (config.out_channels == roi_extractor.out_channels)
from torch.nn.modules.utils import _pair
assert (_pair(config.roi_layer.output_size) ==
roi_extractor.roi_layers[0].output_size)
if 'use_torchvision' in config.roi_layer:
assert (config.roi_layer.use_torchvision ==
roi_extractor.roi_layers[0].use_torchvision)
elif 'aligned' in config.roi_layer:
assert (
config.roi_layer.aligned == roi_extractor.roi_layers[0].aligned)
if prev_roi_extractor:
assert (roi_extractor.roi_layers[0].aligned ==
prev_roi_extractor.roi_layers[0].aligned)
assert (roi_extractor.roi_layers[0].use_torchvision ==
prev_roi_extractor.roi_layers[0].use_torchvision)
def _check_mask_head(mask_cfg, mask_head):
import torch.nn as nn
if isinstance(mask_cfg, list):
for single_mask_cfg, single_mask_head in zip(mask_cfg, mask_head):
_check_mask_head(single_mask_cfg, single_mask_head)
elif isinstance(mask_head, nn.ModuleList):
for single_mask_head in mask_head:
_check_mask_head(mask_cfg, single_mask_head)
else:
assert mask_cfg['type'] == mask_head.__class__.__name__
assert mask_cfg.in_channels == mask_head.in_channels
class_agnostic = mask_cfg.get('class_agnostic', False)
out_dim = (1 if class_agnostic else mask_cfg.num_classes)
if hasattr(mask_head, 'conv_logits'):
assert (mask_cfg.conv_out_channels ==
mask_head.conv_logits.in_channels)
assert mask_head.conv_logits.out_channels == out_dim
else:
assert mask_cfg.fc_out_channels == mask_head.fc_logits.in_features
assert (mask_head.fc_logits.out_features == out_dim *
mask_head.output_area)
def _check_bbox_head(bbox_cfg, bbox_head):
import torch.nn as nn
if isinstance(bbox_cfg, list):
for single_bbox_cfg, single_bbox_head in zip(bbox_cfg, bbox_head):
_check_bbox_head(single_bbox_cfg, single_bbox_head)
elif isinstance(bbox_head, nn.ModuleList):
for single_bbox_head in bbox_head:
_check_bbox_head(bbox_cfg, single_bbox_head)
else:
assert bbox_cfg['type'] == bbox_head.__class__.__name__
if bbox_cfg['type'] == 'SABLHead':
assert bbox_cfg.cls_in_channels == bbox_head.cls_in_channels
assert bbox_cfg.reg_in_channels == bbox_head.reg_in_channels
cls_out_channels = bbox_cfg.get('cls_out_channels', 1024)
assert (cls_out_channels == bbox_head.fc_cls.in_features)
assert (bbox_cfg.num_classes + 1 == bbox_head.fc_cls.out_features)
elif bbox_cfg['type'] == 'DIIHead':
assert bbox_cfg['num_ffn_fcs'] == bbox_head.ffn.num_fcs
# 3 means FC and LN and Relu
assert bbox_cfg['num_cls_fcs'] == len(bbox_head.cls_fcs) // 3
assert bbox_cfg['num_reg_fcs'] == len(bbox_head.reg_fcs) // 3
assert bbox_cfg['in_channels'] == bbox_head.in_channels
assert bbox_cfg['in_channels'] == bbox_head.fc_cls.in_features
assert bbox_cfg['in_channels'] == bbox_head.fc_reg.in_features
assert bbox_cfg['in_channels'] == bbox_head.attention.embed_dims
assert bbox_cfg[
'feedforward_channels'] == bbox_head.ffn.feedforward_channels
else:
assert bbox_cfg.in_channels == bbox_head.in_channels
with_cls = bbox_cfg.get('with_cls', True)
if with_cls:
fc_out_channels = bbox_cfg.get('fc_out_channels', 2048)
assert (fc_out_channels == bbox_head.fc_cls.in_features)
if bbox_head.custom_cls_channels:
assert (bbox_head.loss_cls.get_cls_channels(
bbox_head.num_classes) == bbox_head.fc_cls.out_features
)
else:
assert (bbox_cfg.num_classes +
1 == bbox_head.fc_cls.out_features)
with_reg = bbox_cfg.get('with_reg', True)
if with_reg:
out_dim = (4 if bbox_cfg.reg_class_agnostic else 4 *
bbox_cfg.num_classes)
assert bbox_head.fc_reg.out_features == out_dim
def _check_anchorhead(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
assert config.in_channels == head.in_channels
num_classes = (
config.num_classes -
1 if config.loss_cls.get('use_sigmoid', False) else config.num_classes)
if config['type'] == 'ATSSHead':
assert (config.feat_channels == head.atss_cls.in_channels)
assert (config.feat_channels == head.atss_reg.in_channels)
assert (config.feat_channels == head.atss_centerness.in_channels)
elif config['type'] == 'SABLRetinaHead':
assert (config.feat_channels == head.retina_cls.in_channels)
assert (config.feat_channels == head.retina_bbox_reg.in_channels)
assert (config.feat_channels == head.retina_bbox_cls.in_channels)
else:
assert (config.in_channels == head.conv_cls.in_channels)
assert (config.in_channels == head.conv_reg.in_channels)
assert (head.conv_cls.out_channels == num_classes * head.num_anchors)
assert head.fc_reg.out_channels == 4 * head.num_anchors
# Only tests a representative subset of configurations
# TODO: test pipelines using Albu, current Albu throw None given empty GT
@pytest.mark.parametrize(
'config_rpath',
[
'wider_face/ssd300_wider_face.py',
'pascal_voc/ssd300_voc0712.py',
'pascal_voc/ssd512_voc0712.py',
# 'albu_example/mask_rcnn_r50_fpn_1x.py',
'foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py'
])
def test_config_data_pipeline(config_rpath):
"""Test whether the data pipeline is valid and can process corner cases.
CommandLine:
xdoctest -m tests/test_runtime/
test_config.py test_config_build_data_pipeline
"""
from mmcv import Config
from mmdet.datasets.pipelines import Compose
import numpy as np
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
def dummy_masks(h, w, num_obj=3, mode='bitmap'):
assert mode in ('polygon', 'bitmap')
if mode == 'bitmap':
masks = np.random.randint(0, 2, (num_obj, h, w), dtype=np.uint8)
masks = BitmapMasks(masks, h, w)
else:
masks = []
for i in range(num_obj):
masks.append([])
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (8 + 4 * i, )))
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (10 + 4 * i, )))
masks = PolygonMasks(masks, h, w)
return masks
config_fpath = join(config_dpath, config_rpath)
cfg = Config.fromfile(config_fpath)
# remove loading pipeline
loading_pipeline = cfg.train_pipeline.pop(0)
loading_ann_pipeline = cfg.train_pipeline.pop(0)
cfg.test_pipeline.pop(0)
train_pipeline = Compose(cfg.train_pipeline)
test_pipeline = Compose(cfg.test_pipeline)
print(f'Building data pipeline, config_fpath = {config_fpath}')
print(f'Test training data pipeline: \n{train_pipeline!r}')
img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8)
if loading_pipeline.get('to_float32', False):
img = img.astype(np.float32)
mode = 'bitmap' if loading_ann_pipeline.get('poly2mask',
True) else 'polygon'
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
# test empty GT
print('Test empty GT with training data pipeline: '
f'\n{train_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test empty GT with testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
| 17,127 | 39.112412 | 79 | py |
DDOD | DDOD-main/tests/test_runtime/test_eval_hook.py | import os.path as osp
import tempfile
import unittest.mock as mock
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner, build_optimizer
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
from mmdet.core import DistEvalHook, EvalHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [0.1, 0.4, 0.3, 0.7, 0.2, 0.05, 0.4, 0.6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
mean_ap = self.eval_result[self.index]
output = OrderedDict(mAP=mean_ap, index=self.index, score=mean_ap)
self.index += 1
return output
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, rescale=False, return_loss=False):
return imgs
def train_step(self, data_batch, optimizer, **kwargs):
outputs = {
'loss': 0.5,
'log_vars': {
'accuracy': 0.98
},
'num_samples': 1
}
return outputs
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
@patch('mmdet.apis.single_gpu_test', MagicMock)
@patch('mmdet.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookCls', (EvalHook, DistEvalHook))
def test_eval_hook(EvalHookCls):
with pytest.raises(TypeError):
# dataloader must be a pytorch DataLoader
test_dataset = ExampleDataset()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
EvalHookCls(data_loader)
with pytest.raises(KeyError):
# rule must be in keys of rule_map
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EvalHookCls(data_loader, save_best='auto', rule='unsupport')
with pytest.raises(ValueError):
# key_indicator must be valid when rule_map is None
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EvalHookCls(data_loader, save_best='unsupport')
optimizer_cfg = dict(
type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
optimizer = build_optimizer(model, optimizer_cfg)
data_loader = DataLoader(test_dataset, batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
assert runner.meta is None or 'best_score' not in runner.meta[
'hook_msgs']
assert runner.meta is None or 'best_ckpt' not in runner.meta[
'hook_msgs']
# when `save_best` is set to 'auto', first metric will be used.
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, interval=1, save_best='mAP')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(
data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_6.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.05
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
real_path = osp.join(tmpdir, 'best_mAP_epoch_2.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.4
resume_from = osp.join(tmpdir, 'latest.pth')
loader = DataLoader(ExampleDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
| 8,542 | 32.900794 | 79 | py |
DDOD | DDOD-main/tests/test_runtime/test_fp16.py | import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner import auto_fp16, force_fp32
from mmcv.runner.fp16_utils import cast_tensor_type
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert outputs.dtype == dst_type
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(
tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert outputs['tensor_a'].dtype == dst_type
assert outputs['tensor_b'].dtype == dst_type
inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert outputs[0].dtype == dst_type
assert outputs[1].dtype == dst_type
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
def test_auto_fp16():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject:
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
# apply to specified input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
# apply to optional input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
def test_force_fp32():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject:
@force_fp32()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@force_fp32()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
# apply to specified input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
# apply to optional input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
# out_fp16=True
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'), out_fp16=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
| 9,698 | 31.222591 | 75 | py |
DDOD | DDOD-main/tests/test_models/test_loss.py | import pytest
import torch
from mmdet.models.losses import (BalancedL1Loss, BoundedIoULoss, CIoULoss,
CrossEntropyLoss, DIoULoss,
DistributionFocalLoss, FocalLoss,
GaussianFocalLoss, GIoULoss, IoULoss, L1Loss,
MSELoss, QualityFocalLoss, SmoothL1Loss,
VarifocalLoss)
@pytest.mark.parametrize(
'loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss])
def test_iou_type_loss_zeros_weight(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
weight = torch.zeros(10)
loss = loss_class()(pred, target, weight)
assert loss == 0.
@pytest.mark.parametrize('loss_class', [
IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, MSELoss, L1Loss,
SmoothL1Loss, BalancedL1Loss, FocalLoss, QualityFocalLoss,
GaussianFocalLoss, DistributionFocalLoss, VarifocalLoss, CrossEntropyLoss
])
def test_loss_with_reduction_override(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
with pytest.raises(AssertionError):
# only reduction_override from [None, 'none', 'mean', 'sum']
# is not allowed
reduction_override = True
loss_class()(pred, target, reduction_override=reduction_override)
@pytest.mark.parametrize('loss_class', [
IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, MSELoss, L1Loss,
SmoothL1Loss, BalancedL1Loss
])
def test_regression_losses(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [FocalLoss, CrossEntropyLoss])
def test_classification_losses(loss_class):
pred = torch.rand((10, 5))
target = torch.randint(0, 5, (10, ))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
| 3,668 | 34.970588 | 79 | py |
DDOD | DDOD-main/tests/test_models/test_forward.py | """pytest tests/test_forward.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def test_sparse_rcnn_forward():
config_path = 'sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py'
model = _get_detector_cfg(config_path)
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model)
detector.init_weights()
input_shape = (1, 3, 550, 550)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[5])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector = detector
imgs = imgs
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
detector.forward_dummy(imgs)
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
imgs = imgs
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_rpn_forward():
model = _get_detector_cfg('rpn/rpn_r50_fpn_1x_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 224, 224)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
losses = detector.forward(
imgs, img_metas, gt_bboxes=gt_bboxes, return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize(
'cfg_file',
[
'retinanet/retinanet_r50_fpn_1x_coco.py',
'guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py',
'ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py',
'foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
# 'free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
# 'atss/atss_r50_fpn_1x_coco.py', # not ready for topk
'reppoints/reppoints_moment_r50_fpn_1x_coco.py',
'yolo/yolov3_d53_mstrain-608_273e_coco.py'
])
def test_single_stage_forward_gpu(cfg_file):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
model = _get_detector_cfg(cfg_file)
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (2, 3, 224, 224)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
detector = detector.cuda()
imgs = imgs.cuda()
# Test forward train
gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']]
gt_labels = [g.cuda() for g in mm_inputs['gt_labels']]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def test_faster_rcnn_ohem_forward():
model = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 256, 256)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
@pytest.mark.parametrize(
'cfg_file',
[
'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py',
'htc/htc_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
])
def test_two_stage_forward(cfg_file):
models_with_semantic = [
'htc/htc_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
if cfg_file in models_with_semantic:
with_semantic = True
else:
with_semantic = False
model = _get_detector_cfg(cfg_file)
model['pretrained'] = None
# Save cost
if cfg_file in [
'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
]:
model.roi_head.bbox_head.num_classes = 80
model.roi_head.bbox_head.loss_cls.num_classes = 80
model.roi_head.mask_head.num_classes = 80
model.test_cfg.rcnn.score_thr = 0.05
model.test_cfg.rcnn.max_per_img = 100
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 256, 256)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[10], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[0], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize(
'cfg_file', ['ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'ssd/ssd300_coco.py'])
def test_single_stage_forward_cpu(cfg_file):
model = _get_detector_cfg(cfg_file)
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 300, 300)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
def test_yolact_forward():
model = _get_detector_cfg('yolact/yolact_r50_1x8_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_detr_forward():
model = _get_detector_cfg('detr/detr_r50_8x2_150e_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_kd_single_stage_forward():
model = _get_detector_cfg('ld/ld_r18_gflv1_r101_fpn_coco_1x.py')
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_inference_detector():
from mmdet.apis import inference_detector
from mmdet.models import build_detector
from mmcv import ConfigDict
# small RetinaNet
num_class = 3
model_dict = dict(
type='RetinaNet',
pretrained=None,
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch'),
neck=None,
bbox_head=dict(
type='RetinaHead',
num_classes=num_class,
in_channels=512,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
rng = np.random.RandomState(0)
img1 = rng.rand(100, 100, 3)
img2 = rng.rand(100, 100, 3)
model = build_detector(ConfigDict(model_dict))
config = _get_config_module('retinanet/retinanet_r50_fpn_1x_coco.py')
model.cfg = config
# test single image
result = inference_detector(model, img1)
assert len(result) == num_class
# test multiple image
result = inference_detector(model, [img1, img2])
assert len(result) == 2 and len(result[0]) == num_class
| 19,749 | 30.701445 | 110 | py |
DDOD | DDOD-main/tests/test_models/test_necks.py | import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.necks import (FPN, ChannelMapper, CTResNetNeck,
DilatedEncoder, SSDNeck, YOLOV3Neck)
def test_fpn():
"""Tests fpn."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
# `num_outs` is not equal to len(in_channels) - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
num_outs=2)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=3,
num_outs=1)
# Invalid `add_extra_convs` option
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs='on_xxx',
num_outs=5)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
num_outs=5)
# FPN expects a multiple levels of features per image
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
outs = fpn_model(feats)
assert fpn_model.add_extra_convs == 'on_input'
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with no extra convs (pooling is used instead)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=False,
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert not fpn_model.add_extra_convs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with lateral bns
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
no_norm_on_lateral=False,
norm_cfg=dict(type='BN', requires_grad=True),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
bn_exist = False
for m in fpn_model.modules():
if isinstance(m, _BatchNorm):
bn_exist = True
assert bn_exist
# Bilinear upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(mode='bilinear', align_corners=True),
num_outs=5)
fpn_model(feats)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Scale factor instead of fixed upsample size upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(scale_factor=2),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'inputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_input',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_input'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'laterals'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_lateral',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_lateral'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'outputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_output',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_output'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_channel_mapper():
"""Tests ChannelMapper."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
kernel_size = 3
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
# in_channels must be a list
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=10, out_channels=out_channels, kernel_size=kernel_size)
# the length of channel_mapper's inputs must be equal to the length of
# in_channels
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=in_channels[:-1],
out_channels=out_channels,
kernel_size=kernel_size)
channel_mapper(feats)
channel_mapper = ChannelMapper(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size)
outs = channel_mapper(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dilated_encoder():
in_channels = 16
out_channels = 32
out_shape = 34
dilated_encoder = DilatedEncoder(in_channels, out_channels, 16, 2)
feat = [torch.rand(1, in_channels, 34, 34)]
out_feat = dilated_encoder(feat)[0]
assert out_feat.shape == (1, out_channels, out_shape, out_shape)
def test_ct_resnet_neck():
# num_filters/num_kernels must be a list
with pytest.raises(TypeError):
CTResNetNeck(
in_channel=10, num_deconv_filters=10, num_deconv_kernels=4)
# num_filters/num_kernels must be same length
with pytest.raises(AssertionError):
CTResNetNeck(
in_channel=10,
num_deconv_filters=(10, 10),
num_deconv_kernels=(4, ))
in_channels = 16
num_filters = (8, 8)
num_kernels = (4, 4)
feat = torch.rand(1, 16, 4, 4)
ct_resnet_neck = CTResNetNeck(
in_channel=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels,
use_dcn=False)
# feat must be list or tuple
with pytest.raises(AssertionError):
ct_resnet_neck(feat)
out_feat = ct_resnet_neck([feat])[0]
assert out_feat.shape == (1, num_filters[-1], 16, 16)
if torch.cuda.is_available():
# test dcn
ct_resnet_neck = CTResNetNeck(
in_channel=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels)
ct_resnet_neck = ct_resnet_neck.cuda()
feat = feat.cuda()
out_feat = ct_resnet_neck([feat])[0]
assert out_feat.shape == (1, num_filters[-1], 16, 16)
def test_yolov3_neck():
# num_scales, in_channels, out_channels must be same length
with pytest.raises(AssertionError):
YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4])
# len(feats) must equal to num_scales
with pytest.raises(AssertionError):
neck = YOLOV3Neck(
num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2])
feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16))
neck(feats)
# test normal channels
s = 32
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
# test more flexible setting
s = 32
in_channels = [32, 8, 16]
out_channels = [19, 21, 5]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
def test_ssd_neck():
# level_strides/level_paddings must be same length
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8, 16, 32],
level_strides=[2],
level_paddings=[2, 1])
# length of out_channels must larger than in_channels
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8],
level_strides=[2],
level_paddings=[2])
# len(out_channels) - len(in_channels) must equal to len(level_strides)
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2, 2],
level_paddings=[2, 2])
# in_channels must be same with out_channels[:len(in_channels)]
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2],
level_paddings=[2])
ssd_neck = SSDNeck(
in_channels=[4],
out_channels=[4, 8, 16],
level_strides=[2, 1],
level_paddings=[1, 0])
feats = (torch.rand(1, 4, 16, 16), )
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 16, 16)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 6, 6)
# test SSD-Lite Neck
ssd_neck = SSDNeck(
in_channels=[4, 8],
out_channels=[4, 8, 16],
level_strides=[1],
level_paddings=[1],
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'))
assert not hasattr(ssd_neck, 'l2_norm')
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(ssd_neck.extra_layers[0][-1],
DepthwiseSeparableConvModule)
feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8))
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 8, 8)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 8, 8)
| 12,179 | 31.393617 | 79 | py |
DDOD | DDOD-main/tests/test_models/test_backbones/test_hourglass.py | import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(num_stacks=1)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 256, 64, 64])
# Test HourglassNet-104
model = HourglassNet(num_stacks=2)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 256, 64, 64])
assert feat[1].shape == torch.Size([1, 256, 64, 64])
| 1,301 | 27.933333 | 65 | py |
DDOD | DDOD-main/tests/test_models/test_backbones/test_res2net.py | import pytest
import torch
from mmdet.models.backbones import Res2Net
from mmdet.models.backbones.res2net import Bottle2neck
from .utils import is_block
def test_res2net_bottle2neck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')
with pytest.raises(AssertionError):
# Scale must be larger than 1
Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')
# Test Res2Net Bottle2neck structure
block = Bottle2neck(
64, 64, base_width=26, stride=2, scales=4, style='pytorch')
assert block.scales == 4
# Test Res2Net Bottle2neck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
Bottle2neck(
64,
64,
base_width=26,
scales=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
Bottle2neck(64, 64, dcn=dcn)
# Test Res2Net Bottle2neck forward
block = Bottle2neck(64, 16, base_width=26, scales=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_res2net_backbone():
with pytest.raises(KeyError):
# Res2Net depth should be in [50, 101, 152]
Res2Net(depth=18)
# Test Res2Net with scales 4, base_width 26
model = Res2Net(depth=50, scales=4, base_width=26)
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
| 1,961 | 30.142857 | 72 | py |
DDOD | DDOD-main/tests/test_models/test_backbones/test_resnet.py | import pytest
import torch
from mmcv import assert_params_all_zeros
from mmcv.ops import DeformConv2dPack
from torch.nn.modules import AvgPool2d, GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones import ResNet, ResNetV1d
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .utils import check_norm_state, is_block, is_norm
def test_resnet_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
BasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
BasicBlock(64, 64, plugins=plugins)
# test BasicBlock structure and forward
block = BasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test BasicBlock with checkpoint forward
block = BasicBlock(64, 64, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck style
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = Bottleneck(64, 64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
# Test Bottleneck forward
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_simplied_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
SimplifiedBasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
SimplifiedBasicBlock(64, 64, with_cp=True)
# test SimplifiedBasicBlock structure and forward
block = SimplifiedBasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# test SimplifiedBasicBlock without norm
block = SimplifiedBasicBlock(64, 64, norm_cfg=None)
assert block.norm1 is None
assert block.norm2 is None
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_res_layer():
# Test ResLayer of 3 Bottleneck w\o downsample
layer = ResLayer(Bottleneck, 64, 16, 3)
assert len(layer) == 3
assert layer[0].conv1.in_channels == 64
assert layer[0].conv1.out_channels == 16
for i in range(1, len(layer)):
assert layer[i].conv1.in_channels == 64
assert layer[i].conv1.out_channels == 16
for i in range(len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with downsample
layer = ResLayer(Bottleneck, 64, 64, 3)
assert layer[0].downsample[0].out_channels == 256
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 56, 56])
# Test ResLayer of 3 Bottleneck with stride=2
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
assert layer[0].downsample[0].out_channels == 256
assert layer[0].downsample[0].stride == (2, 2)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 Bottleneck with stride=2 and average downsample
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
assert isinstance(layer[0].downsample[0], AvgPool2d)
assert layer[0].downsample[1].out_channels == 256
assert layer[0].downsample[1].stride == (1, 1)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False
layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)
assert layer[2].downsample[0].out_channels == 64
assert layer[2].downsample[0].stride == (2, 2)
for i in range(len(layer) - 1):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 28, 28])
def test_resnest_stem():
# Test default stem_channels
model = ResNet(50)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
# Test default stem_channels, with base_channels=32
model = ResNet(50, base_channels=32)
assert model.stem_channels == 32
assert model.conv1.out_channels == 32
assert model.norm1.num_features == 32
assert model.layer1[0].conv1.in_channels == 32
# Test stem_channels=64
model = ResNet(50, stem_channels=64)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
assert model.layer1[0].conv1.in_channels == 64
# Test stem_channels=64, with base_channels=32
model = ResNet(50, stem_channels=64, base_channels=32)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
assert model.layer1[0].conv1.in_channels == 64
# Test stem_channels=128
model = ResNet(depth=50, stem_channels=128)
model.init_weights()
model.train()
assert model.conv1.out_channels == 128
assert model.layer1[0].conv1.in_channels == 128
# Test V1d stem_channels
model = ResNetV1d(depth=50, stem_channels=128)
model.init_weights()
model.train()
assert model.stem[0].out_channels == 64
assert model.stem[1].num_features == 64
assert model.stem[3].out_channels == 64
assert model.stem[4].num_features == 64
assert model.stem[6].out_channels == 128
assert model.stem[7].num_features == 128
assert model.layer1[0].conv1.in_channels == 128
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# len(stage_with_dcn) == num_stages
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True, ))
with pytest.raises(AssertionError):
# len(stage_with_plugin) == num_stages
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True),
position='after_conv3')
]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = ResNet(50, pretrained=0)
model.init_weights()
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet50 norm_eval=True
model = ResNet(50, norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with torchvision pretrained weight
model = ResNet(
depth=50, norm_eval=True, pretrained='torchvision://resnet50')
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with first stage frozen
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages)
model.init_weights()
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet50V1d with first stage frozen
model = ResNetV1d(depth=50, frozen_stages=frozen_stages)
assert len(model.stem) == 9
model.init_weights()
model.train()
assert check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet18 forward
model = ResNet(18)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
# Test ResNet50 with BatchNorm forward
model = ResNet(50)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
# Test ResNet50 with layers 1, 2, 3 out forward
model = ResNet(50, out_indices=(0, 1, 2))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
# Test ResNet50 with checkpoint forward
model = ResNet(50, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
# Test ResNet50 with GroupNorm forward
model = ResNet(
50, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, True, True, True),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'gen_attention_block')
assert m.nonlocal_block.in_channels == 64
for m in model.layer2.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 128
assert m.gen_attention_block.in_channels == 128
assert m.context_block.in_channels == 512
for m in model.layer3.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 256
assert m.gen_attention_block.in_channels == 256
assert m.context_block.in_channels == 1024
for m in model.layer4.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 512
assert m.gen_attention_block.in_channels == 512
assert not hasattr(m, 'context_block')
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
# Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
# conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
stages=(False, True, True, False),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
for m in model.layer2.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 512
assert m.context_block2.in_channels == 512
for m in model.layer3.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 1024
assert m.context_block2.in_channels == 1024
for m in model.layer4.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
# Test ResNet50 zero initialization of residual
model = ResNet(50, zero_init_residual=True)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert assert_params_all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert assert_params_all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
# Test ResNetV1d forward
model = ResNetV1d(depth=50)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
| 23,501 | 34.235382 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.