repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_quantization_state_utils.py | import dataclasses
from typing import Callable, Tuple, Any, List, Optional, Dict
import torch
import torch.nn.functional as F
import torch.nn.quantized.dynamic as nnqd
from intel_extension_for_pytorch.nn.functional import interaction
import intel_extension_for_pytorch._C as core
functions_supported_by_quantization = set(
[
torch.Tensor.add,
torch.add,
torch.Tensor.relu,
# torch.Tensor.sigmoid, # TODO
torch.flatten,
torch.Tensor.flatten,
F.adaptive_avg_pool2d,
F.adaptive_avg_pool3d,
F.avg_pool2d,
F.avg_pool3d,
F.max_pool2d,
F.max_pool3d,
F.conv2d,
F.conv3d,
torch.conv2d,
torch.conv3d,
F.conv_transpose2d,
F.conv_transpose3d,
torch.conv_transpose2d,
torch.conv_transpose3d,
torch.relu,
F.relu,
# torch.sigmoid, # TODO
# F.sigmoid, # TODO
# F.gelu, # TODO
F.linear,
torch._C._nn.linear,
torch.matmul,
torch.bmm,
torch.Tensor.matmul,
torch.Tensor.bmm,
F.embedding_bag,
torch.embedding_bag,
]
)
# ipex customer function
functions_supported_by_quantization_ipex = set(
[
interaction,
torch.ops.torch_ipex.interaction_forward,
]
)
module_types_supported_by_quantization = set(
[
torch.nn.Conv2d,
torch.nn.Conv3d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d,
torch.nn.Linear,
torch.nn.MaxPool2d,
torch.nn.MaxPool3d,
torch.nn.AvgPool2d,
torch.nn.AvgPool3d,
torch.nn.AdaptiveAvgPool2d,
torch.nn.AdaptiveAvgPool3d,
torch.nn.ReLU,
# torch.nn.Sigmoid, # TODO
# torch.nn.GELU, # TODO
torch.nn.EmbeddingBag,
torch.nn.Flatten,
torch.nn.LSTM,
# dynamic quantization module
nnqd.Linear,
nnqd.LSTM,
]
)
may_inplace_module = set(
[
torch.nn.ReLU,
]
)
a_related_to_b = (
(str(torch.add), str(torch.Tensor.add)),
(str(torch.Tensor.add), str(torch.add)),
(str(torch.nn.Linear), str(nnqd.Linear)),
(str(nnqd.Linear), str(torch.nn.Linear)),
(str(torch.nn.LSTM), str(nnqd.LSTM)),
(str(nnqd.LSTM), str(torch.nn.LSTM)),
)
conv_linear_ops = [
# F.conv1d, # it will be enabled at next step.
str(F.conv2d),
str(F.conv3d),
str(torch.conv2d),
str(torch.conv3d),
str(F.conv_transpose2d),
str(F.conv_transpose3d),
str(torch.conv_transpose2d),
str(torch.conv_transpose3d),
str(F.linear),
str(torch._C._nn.linear),
]
conv_linear_modules = [
# str(torch.nn.Conv1d) # it will be enabled at next step.
str(torch.nn.Conv2d),
str(torch.nn.Conv3d),
str(torch.nn.ConvTranspose2d),
str(torch.nn.ConvTranspose3d),
str(torch.nn.Linear),
]
embedding_op = [
str(F.embedding_bag),
str(torch.embedding_bag),
]
def op_needs_quantization(op: Callable) -> bool:
if (
op in functions_supported_by_quantization
or op in functions_supported_by_quantization_ipex
):
return True
elif type(op) in module_types_supported_by_quantization:
if op in may_inplace_module and op.inplace:
return False
return True
else:
return False
def ops_are_related(
cur_op: Callable,
expected_op_type: str,
type_is_module: bool,
) -> bool:
r"""
This function is to check whether the cur_op is align with the saved op_type, which make sure
the model doesn't have dynamic workflow.
"""
if type_is_module:
cur_op = type(cur_op)
return (
str(cur_op) == expected_op_type
or (str(cur_op), expected_op_type) in a_related_to_b
)
def _raise_obs_not_found_error(func):
raise RuntimeError(
f"Encountered arithmetic operation {torch.typename(func)} but we have "
f"encountered fewer arithmetic operations in previous calibration runs. "
f"This likely indicates that the program contains dynamic control flow. "
f" Quantization is not defined over dynamic control flow!"
)
def _raise_obs_op_mismatch(func, prev_op):
raise RuntimeError(
f"Encountered arithmetic operation {torch.typename(func)} but previously "
f"recorded operation was {prev_op}!. This likely indicates "
f"that the program contains dynamic control flow. Quantization is not "
f"defined over dynamic control flow!"
)
@dataclasses.dataclass
class QTensorInfo:
id: int # tensor ID
orig_dtype: torch.dtype # dtype seen while tracing with example input
inf_dtype: torch.dtype # dtype at inference
@dataclasses.dataclass
class SeenQOpInfo:
idx: int
# Python type of the seen op. For modules, this is str(type(mod)). For
# functions, this is the target function(str).
type: str
# True if the type is a module, False otherwise (for functions/methods).
type_is_module: bool
# Note: FQN refers to the current module for modules and to the parent
# module for functions
fqn: str
# Information about the input tensors
# Non-tensor inputs are represented with None.
input_tensor_infos: List[Optional[QTensorInfo]]
# We use input_tensor_infos's inf_dtype to check whether we need add fake quant
# at convert step, but sometimes, the QTensorInfo's infor may used by many
# operators, and one operator may set QTensorInfo' inf dtype to fp32, which hope
# use fp32 kernel, but the cur op hope use low-precison op, so we introduce this flag
# to fix the multi-use case: if input_tensor_force_inf_dtype has low-precison, we will
# ignore the related QTensorInfo's inf dtype even QTensorInfo's inf dtype is fp32 dtype.
# Note: the inint value of the QTensorInfo's is orig dtype.
input_tensor_force_inf_dtype: List[Optional[torch.dtype]]
# Information about the output tensors
# Non-tensor outputs are represented with None.
output_tensor_infos: List[QTensorInfo]
# Some operator only support INT8->INT8, if post operator is non-quantized op,
# the output_tensor_infos's inf dtype always same as orig dtype, we can set the output_tensor_infos's
# inf dtype to int8, and do a check whether add fake quant after output according to the inf dtype,
# but if the post operator is quantized op, we will add two fake quant if we only check the inf dtype.
# so we introduce insert_fake_quant_after_output to fix this issue: if insert_fake_quant_after_output is true,
# and the the inf dtype is int8, we will add fake quant after the output, otherwise, we will not insert fake quant
# after the output(if inf dtype is int8, but insert_fake_quant_after_output is False, the post op will insert
# fake quant, if inf dtype is not int8, the output hopes a orig dtype, we don't need to add fake quant).
# Note: the init value of the insert_fake_quant_after_output's is False.
# Our Quant param binding algorithm (binding info used to decide whether to add q/dq at runtime) is that:
# 1. Bind input tensors by default for all quantized ops.
# 2. Bind output tensor if any of downstream ops is not quantized.
insert_fake_quant_after_outputs: List[Optional[bool]]
weight_tensor_infos: List[Optional[QTensorInfo]]
qconfig: torch.ao.quantization.QConfig
def __repr__(self) -> str:
s = f"(type): {self.type}\n"
s += f" (fqn): {self.fqn}\n"
s += f" (input_tensor_infos): {self.input_tensor_infos}\n"
s += f" (input_tensor_force_inf_dtype): {self.input_tensor_force_inf_dtype}\n"
s += f" (output_tensor_infos): {self.output_tensor_infos}\n"
s += f" (insert_fake_quant_after_outputs): {self.insert_fake_quant_after_outputs}\n"
s += f" (weight_tensor_infos): {self.weight_tensor_infos}\n"
s += f" (qconfig): {self.qconfig}"
return s
@dataclasses.dataclass
class SeenNonQOpInfo:
# Python type of the seen op. For modules, this is str(type(mod)). For
# functions, this is the target function.
type: str
# Note: FQN refers to the current module for modules and to the parent
# module for functions
fqn: str
# Information about the input tensors
# Non-tensor inputs are represented with None.
input_tensor_infos: List[Optional[QTensorInfo]]
# Information about the output tensors
# Non-tensor outputs are represented with None.
output_tensor_infos: List[QTensorInfo]
def get_input_observed_arg_idxs(
op_type: str,
op_type_is_module: bool,
) -> Optional[List[int]]:
if op_type_is_module and op_type != str(torch.nn.EmbeddingBag):
# TODO(future PR): handle RNNs
return [0]
elif op_type in conv_linear_ops:
return [0, 1]
elif op_type in embedding_op:
return [1]
# None means "observe all Tensor args"
return None
def get_weight_arg_idx(op: str) -> Optional[int]:
if op in conv_linear_ops:
return 1
return None
def set_tensor_info_dtype(tensor_info: QTensorInfo, observer):
"""
This function is expected to be called on the prepare step which is tensor_info's
inf_dtype is not same as observe's dtype when user load a changed configure json file.
"""
quantized_dtype = [torch.quint8, torch.qint8]
if (
tensor_info.inf_dtype in quantized_dtype
and tensor_info.inf_dtype != tensor_info.orig_dtype
and tensor_info.inf_dtype != observer.dtype
):
tensor_info.inf_dtype = observer.dtype
def iterate_and_apply(
args: Any,
flattened_tensor_infos: List[Optional[QTensorInfo]],
func: Callable,
flattened_tensor_infos_idx=None,
) -> Any:
"""
Inputs:
`args`: arguments to a function, may contain nested types, for example:
([torch.Tensor, torch.Tensor], int, (int, int))
`flattened_tensor_infos`: tensor information containers for each tensor
in `args`, flattened, for example corresponding with above:
({...}, {...}, None, None, None)
`func`: function to apply to each tensor in `args` to create `new_args`
Returns `new_args`, where each tensor has been transformed by `func`.
"""
if flattened_tensor_infos_idx is None:
flattened_tensor_infos_idx = [0]
if isinstance(args, tuple):
new_args = []
for arg in args:
new_arg = iterate_and_apply(
arg, flattened_tensor_infos, func, flattened_tensor_infos_idx
)
new_args.append(new_arg)
return tuple(new_args)
elif isinstance(args, list):
for idx in range(len(args)):
new_arg = iterate_and_apply(
args[idx], flattened_tensor_infos, func, flattened_tensor_infos_idx
)
args[idx] = new_arg
return args
else:
# individual element
cur_flattened_tensor_info = flattened_tensor_infos[
flattened_tensor_infos_idx[0]
]
flattened_tensor_infos_idx[0] += 1
if cur_flattened_tensor_info is not None:
return func(args, cur_flattened_tensor_info)
else:
return args
def iterate_and_apply_convert(
args: Any,
quant_infos: List[Optional[Tuple[float, int, torch.dtype]]],
quant_or_dequant_needed: List[bool],
op: Callable,
flattened_tensor_infos_idx=None,
) -> Any:
"""
Inputs:
`args`: arguments to a function, may contain nested types, for example:
([torch.Tensor, torch.Tensor], int, (int, int))
`quant_infos`: tensor information containers for each tensor
in `args`, flattened, for example corresponding with above:
({...}, {...}, None, None, None)
`quant_or_dequant_needed`: tensor information about whether do quantization
containers for each tensorin `args`,
`op`: cur quantizable op
Returns `new_args`, where each tensor has been transformed by `func`.
"""
if flattened_tensor_infos_idx is None:
flattened_tensor_infos_idx = [0]
if isinstance(args, tuple):
new_args = []
for arg in args:
new_arg = iterate_and_apply_convert(
arg,
quant_infos,
quant_or_dequant_needed,
op,
flattened_tensor_infos_idx,
)
new_args.append(new_arg)
return tuple(new_args)
elif isinstance(args, list):
new_args = []
for arg in args:
new_arg = iterate_and_apply_convert(
arg,
quant_infos,
quant_or_dequant_needed,
op,
flattened_tensor_infos_idx,
)
new_args.append(new_arg)
return new_args
else:
# individual element
cur_quant_infos = quant_infos[flattened_tensor_infos_idx[0]]
cur_quant_or_dequant_needed = quant_or_dequant_needed[
flattened_tensor_infos_idx[0]
]
if (
cur_quant_infos is not None
and cur_quant_or_dequant_needed
and isinstance(args, torch.Tensor)
):
scale, zp, dtype = cur_quant_infos
# For F.Linear, F.conv, the weight's may use per_channel.
if (
str(op) in conv_linear_ops
and get_weight_arg_idx(str(op)) == flattened_tensor_infos_idx[0]
and isinstance(scale, torch.Tensor)
and scale.numel() > 1
):
ch_axis = 0
# conv_transpose's weight is iohw or iodhw
if str(op) in [
str(F.conv_transpose2d),
str(torch.conv_transpose2d),
str(F.conv_transpose3d),
str(torch.conv_transpose3d),
]:
ch_axis = 1
# core.get_autocast_dtype() will be removed after fully use pytorch autocast
if (
torch.is_autocast_cpu_enabled()
and core.get_autocast_dtype() == torch.bfloat16
):
# do autocast in Python side
if args.dtype == torch.float32:
args = args.to(dtype=torch.float32)
args = torch.quantize_per_channel(args, scale, zp, ch_axis, dtype)
args = args.dequantize()
args = args.to(dtype=torch.bfloat16)
else:
args = torch.quantize_per_channel(args, scale, zp, ch_axis, dtype)
args = args.dequantize()
else:
# white list, conv, linear, matmul, we always convert it's input to bflat16 firstly, and then inser q+dq
if (
str(op)
in conv_linear_ops
+ [
str(torch.matmul),
str(torch.Tensor.matmul),
str(torch.bmm),
str(torch.Tensor.bmm),
]
+ embedding_op
or str(type(op)) in conv_linear_modules
):
if (
torch.is_autocast_cpu_enabled()
and core.get_autocast_dtype() == torch.bfloat16
):
if args.dtype == torch.bfloat16:
args = args.to(dtype=torch.float32)
args = torch.quantize_per_tensor(
args, scale.item(), zp.item(), dtype
)
args = args.dequantize()
args = args.to(dtype=torch.bfloat16)
else:
args = torch.quantize_per_tensor(
args, scale.item(), zp.item(), dtype
)
args = args.dequantize()
else:
# fall through
args_is_bfloat16 = False
if args.dtype == torch.bfloat16:
args_is_bfloat16 = True
args = args.to(dtype=torch.float32)
args = torch.quantize_per_tensor(
args, scale.item(), zp.item(), dtype
)
args = args.dequantize()
if args_is_bfloat16:
args = args.to(dtype=torch.bfloat16)
flattened_tensor_infos_idx[0] += 1
return args
def get_input_args_quant_dequant_info(
seen_q_op_info: SeenQOpInfo,
tensor_id_to_scale_zp: Dict[int, Tuple[torch.Tensor, torch.Tensor]],
) -> Tuple[List[Optional[Tuple[float, int, torch.dtype]]], List[bool], bool]:
"""
Returns a list of information about the tensor inputs to the current op.
Quant list:
For each tensor input:
* if the tensor input needs a quant, the list will contain
(scale, zero_point)
* if the tensor input does not need a quant, the list will contain None
"""
quant_infos: List[Optional[Tuple[float, int, torch.dtype]]] = []
quantized_dtype = [torch.quint8, torch.qint8]
any_arg_quant_or_dequant_needed = []
if len(seen_q_op_info.input_tensor_infos) > 0:
for i, input_arg in enumerate(seen_q_op_info.input_tensor_infos):
if input_arg is not None:
if input_arg.id in tensor_id_to_scale_zp:
tensor_id = input_arg.id
inf_dtype = input_arg.inf_dtype
# force_inf_dtype always should be same as input_arg.inf_dtype, but some time,
# the input arg may be used by many other operators, and it may have been
# changed by other operators, so for cur op, twe check whether input_arg.inf_dtype
# is same as the origin force_inf_dtype, if not same use force_inf_dtype as new
# inf dtype, if same, we can say the input_arg.inf_dtype is not changed or the cur op
# changed input_arg.inf_dtype and force_inf_dtype at get default recipe step.
if (
seen_q_op_info.input_tensor_force_inf_dtype[i]
!= input_arg.inf_dtype
):
inf_dtype = seen_q_op_info.input_tensor_force_inf_dtype[i]
scale, zp = tensor_id_to_scale_zp[tensor_id]
quant_infos.append((scale, zp, inf_dtype)) # type: ignore[arg-type]
# only support float to int8.
if (
input_arg.orig_dtype == torch.float32
and inf_dtype in quantized_dtype
):
any_arg_quant_or_dequant_needed.append(True)
else:
any_arg_quant_or_dequant_needed.append(False)
else:
quant_infos.append(None)
any_arg_quant_or_dequant_needed.append(False)
else:
quant_infos.append(None)
any_arg_quant_or_dequant_needed.append(None)
return quant_infos, any_arg_quant_or_dequant_needed
def get_weight_args_quant_dequant_info(
seen_q_op_info: SeenQOpInfo,
weight_tensor_id_to_scale_zp: Dict[str, Tuple[torch.Tensor, torch.Tensor]],
) -> Tuple[List[Optional[Tuple[float, int, torch.dtype]]], List[bool], bool]:
"""
Returns a list of information about the tensor inputs to the current op.
"""
quant_infos: List[Optional[Tuple[float, int, torch.dtype]]] = []
any_arg_quant_or_dequant_needed = []
for _, input_arg in enumerate(seen_q_op_info.weight_tensor_infos):
if input_arg is not None:
tensor_id = str(seen_q_op_info.idx) + "_" + str(input_arg.id)
if tensor_id in weight_tensor_id_to_scale_zp:
scale, zp = weight_tensor_id_to_scale_zp[tensor_id]
output_dtype = input_arg.inf_dtype
quant_infos.append((scale, zp, output_dtype)) # type: ignore[arg-type]
if input_arg.orig_dtype == torch.float32 and input_arg.inf_dtype in [
torch.quint8,
torch.qint8,
]:
any_arg_quant_or_dequant_needed.append(True)
else:
any_arg_quant_or_dequant_needed.append(False)
else:
quant_infos.append(None)
any_arg_quant_or_dequant_needed.append(False)
else:
quant_infos.append(None)
any_arg_quant_or_dequant_needed.append(None)
return quant_infos, any_arg_quant_or_dequant_needed
| 20,844 | 36.969035 | 120 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_module_swap_utils.py | from typing import Dict, Callable, Any, Optional
import torch
import torch.nn as nn
from torch.ao.quantization import swap_module
import torch.nn.quantized.dynamic as nnqd
from torch.quantization.qconfig import QConfig
# Default map for swapping dynamic modules
DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS: Dict[Callable, Any] = {
nn.Linear: nnqd.Linear,
nn.LSTM: nnqd.LSTM,
# TODO: support more RNN module
# nn.GRUCell: nnqd.GRUCell,
# nn.GRU: nnqd.GRU,
# nn.LSTMCell: nnqd.LSTMCell,
# nn.RNNCell: nnqd.RNNCell,
}
def _get_qconfig_dtypes(qconfig):
r"""
Returns the qconfig tuple for qconfig:
(activation_dtype, weight_dtype, activation_compute_dtype)
"""
assert qconfig is not None
activation = qconfig.activation()
weight = qconfig.weight()
compute_dtype = (
activation.compute_dtype if hasattr(activation, "compute_dtype") else None
)
return (activation.dtype, weight.dtype, compute_dtype)
def _op_is_int8_dynamically_quantized(qconfig) -> bool:
r"""
Given a qconfig, returns True if this op is using int8 dynamic
quantization
"""
activation_dtype, weight_dtype, activation_compute_dtype = _get_qconfig_dtypes(
qconfig
)
return (
activation_dtype is torch.float
and
# for now, the lines below assume fbgemm or qnnpack
weight_dtype is torch.qint8
and activation_compute_dtype is torch.quint8
)
def _swap_child_modules(
module: torch.nn.Module,
fqn_qconfig: Dict[str, QConfig],
dynamic_mappings: Dict[Callable, Any] = DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
parent_fqn: Optional[str] = None,
) -> None:
"""
For each direct child of `module`, swaps it using `dyanamic_mappings`
if the qconfig for that child is using int8 dynamic quantization,
and the module type is in the mapping.
Recursively calls itself on each child.
"""
reassign = {}
for local_fqn, mod in module.named_children():
if parent_fqn is None:
global_fqn = local_fqn
else:
global_fqn = f"{parent_fqn}.{local_fqn}"
_swap_child_modules(mod, fqn_qconfig, dynamic_mappings, global_fqn)
if global_fqn in fqn_qconfig:
qconfig = fqn_qconfig[global_fqn]
if not qconfig:
continue
mod.qconfig = qconfig
op_int8_dynamically_quantized = _op_is_int8_dynamically_quantized(qconfig)
if op_int8_dynamically_quantized:
if not type(mod) in dynamic_mappings:
continue
reassign[local_fqn] = swap_module(mod, dynamic_mappings, {})
for key, value in reassign.items():
module._modules[key] = value
def swap_child_modules(
module: torch.nn.Module,
dynamic_mappings: Dict[Callable, Any] = DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
) -> None:
fqn_qconfig = {}
for _, v in module._fqn_to_auto_quant_state_map.items():
if len(v.idx_to_seen_q_op_infos) > 0:
for _, op_info in v.idx_to_seen_q_op_infos.items():
fqn_qconfig[op_info.fqn] = op_info.qconfig
_swap_child_modules(module, fqn_qconfig, dynamic_mappings)
| 3,210 | 31.434343 | 86 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_quantize.py | import copy
import functools
import os
import warnings
import torch
from torch.ao.quantization import PlaceholderObserver, QConfig, QConfigMapping
from torch.ao.quantization.quantization_mappings import (
get_default_dynamic_quant_module_mappings,
)
import torch.fx.experimental.optimization as optimization
from torch.ao.nn.quantized.modules.utils import _quantize_weight
import torch.ao.nn.quantized.dynamic as nnqd
import intel_extension_for_pytorch._C as core
from intel_extension_for_pytorch.cpu.utils.linear_bn_folding import linear_bn_fuse
from intel_extension_for_pytorch.nn.utils._weight_prepack import (
may_import_deepspeed_modules,
)
from ._quantize_utils import auto_prepare, auto_convert, copy_prepared_model
from .. import nn
from typing import Dict
def prepare(
model,
configure,
example_inputs=None,
inplace=False,
bn_folding=True,
example_kwarg_inputs=None,
):
r"""
Prepare an FP32 torch.nn.Module model to do calibration or to convert to quantized model.
Args:
model (torch.nn.Module): The FP32 model to be prepared.
configure (torch.quantization.qconfig.QConfig): The observer settings about activation and weight.
example_inputs (tuple or torch.Tensor): A tuple of example inputs that
will be passed to the function while running to init quantization state. Only one of this
argument or ``example_kwarg_inputs`` should be specified.
inplace: (bool): It will change the given model in-place if True. The default value is ``False``.
bn_folding: (bool): whether to perform ``conv_bn`` and ``linear_bn`` folding.
The default value is ``True``.
example_kwarg_inputs (dict): A dict of example inputs that will be passed to the function while
running to init quantization state. Only one of this argument or ``example_inputs`` should be
specified.
Returns:
torch.nn.Module
"""
assert isinstance(
model, torch.nn.Module
), "Only support nn.Module prepare for quantization path"
assert isinstance(configure, QConfigMapping) or isinstance(
configure, QConfig
), f"IPEX quantization: prepare configure should be an instance of QConfigMapping or QConfig, but got {type(configure)}"
if isinstance(configure, QConfig):
warnings.warn(
"\nIPEX quantization: QConfig are deprecated. Please use QConfigMapping instead.\nUsage:"
"\n qconfig_mapping = ipex.quantization.default_static_qconfig_mapping # for static quantization"
"\n qconfig_mapping = ipex.quantization.default_dynamic_qconfig_mapping # for dynamic quantization"
"\n prepared_model = ipex.quantization.prepare(model_fp32, qconfig_mapping, ...)"
)
if isinstance(configure, QConfigMapping):
configure = configure.global_qconfig
if not isinstance(configure.activation(), PlaceholderObserver):
assert example_inputs is not None or example_kwarg_inputs is not None, (
"IPEX quantization.prepare: example_inputs and example_kwarg_inputs cannot be none at same time "
"for static quantization."
)
# auto model channels_last memory format conversion
from ..frontend import (
auto_channels_last,
_convert_convNd_deconvNd_weight_memory_format,
)
if auto_channels_last:
_convert_convNd_deconvNd_weight_memory_format(model)
if inplace:
prepare_model = model
else:
try:
prepare_model = copy.deepcopy(model)
except BaseException:
AssertionError(
False
), "The model's copy is failed, please try set inplace to True to do the prepare"
if bn_folding:
try:
prepare_model = optimization.fuse(prepare_model, inplace=inplace)
prepare_model = linear_bn_fuse(prepare_model, inplace=inplace)
except BaseException:
warnings.warn("BatchNorm folding failed during the prepare process.")
# replace dropout with identity to enable more fusion pattern.
nn.utils._model_convert.replace_dropout_with_identity(prepare_model)
assert (
example_inputs is None or example_kwarg_inputs is None
), "IPEX quantization.prepare: example_inputs and example_kwarg_inputs cannot be set at same time."
# Special case for common case of passing a single Tensor
if isinstance(example_inputs, (torch.Tensor, dict)):
example_inputs = (example_inputs,)
elif not isinstance(example_inputs, tuple) and example_inputs is not None:
example_inputs = tuple(example_inputs)
if example_kwarg_inputs is not None:
assert isinstance(
example_kwarg_inputs, Dict
), "IPEX quantization.prepare: example_kwarg_inputs must be type of Dict."
return auto_prepare(prepare_model, configure, example_inputs, example_kwarg_inputs)
@functools.lru_cache(None)
def IPEX_DYNAMIC_QUANTIZATION_MODULE_CPU():
# TODO: have to override Linear here for GPT-J performance.
# AutoTP model does not support deepcopy, thus need to do inplace convert.
# The str(module) is called in GPT-J, which invokes the __repr__ function:
#
# torch/ao/nn/quantized/modules/linear.py:__repr__()
# -> return _hide_packed_params_repr(self, LinearPackedParams)
# torch/ao/nn/quantized/modules/utils.py:_hide_packed_params_repr()
# -> extra_repr = self.extra_repr()
# torch/ao/nn/quantized/dynamic/modules/linear.py:extra_repr()
# -> extra_repr_str += ', qscheme={}'.format(self.weight().qscheme())
# torch/ao/nn/quantized/modules/linear.py:weight()
# -> return self._weight_bias()[0]
# torch/ao/nn/quantized/modules/linear.py:_weight_bias()
# -> return self._packed_params._weight_bias()
# > torch/ao/nn/quantized/modules/linear.py:_weight_bias()
# -> return torch.ops.quantized.linear_unpack(self._packed_params)
#
# The quantized::linear_unpack function is quite slow.
# We will override the __repr__ function of nnqd.Linear in DynamicQuantizedLinearLayer.
torch_modules = {
torch.nn.Linear: DynamicQuantizedLinearLayer,
}
deepspeed_modules = may_import_deepspeed_modules()
if deepspeed_modules is not None:
LinearAllreduce, LinearLayer = deepspeed_modules
deepspeed_modules = {
LinearLayer: DynamicQuantizedLinearLayer,
LinearAllreduce: DynamicQuantizedLinearAllreduce,
}
torch_modules.update(deepspeed_modules)
return torch_modules
class _IPEXDynamicQuantizedLinear(nnqd.Linear):
@classmethod
def from_float(cls, mod):
assert (
type(mod) in cls._float_module()
), "DynamicQuantizedLinearLayer.from_float only works for one of" + str(
[float_mod.__name__ for float_mod in cls._float_module()]
)
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
if mod.qconfig is not None and mod.qconfig.weight is not None:
weight_observer = mod.qconfig.weight()
else:
from torch.ao.quantization.qconfig import default_dynamic_qconfig
weight_observer = default_dynamic_qconfig.weight()
dtype = weight_observer.dtype
assert dtype in [torch.qint8], (
"The only supported dtypes for "
"DynamicQuantizedLinearLayer is qint8 got: {}".format(dtype)
)
weight_observer(mod.weight)
qweight = _quantize_weight(mod.weight.float(), weight_observer)
qlinear = cls._init_cls(mod, dtype, qweight)
return qlinear
class DynamicQuantizedLinearLayer(_IPEXDynamicQuantizedLinear):
@classmethod
def _init_cls(cls, mod, dtype, qweight):
qlinear = cls(mod.weight.size()[1], mod.weight.size()[0], dtype=dtype)
qlinear.set_weight_bias(qweight, mod.bias)
return qlinear
@classmethod
@functools.lru_cache(None)
def _float_module(cls):
_FLOAT_MODULE = [torch.nn.Linear]
deepspeed_modules = may_import_deepspeed_modules()
if deepspeed_modules is not None:
_, LinearLayer = deepspeed_modules
_FLOAT_MODULE.extend([LinearLayer])
return _FLOAT_MODULE
def __repr__(self):
return "DynamicQuantizedLinearLayer()"
class DynamicQuantizedLinearAllreduce(_IPEXDynamicQuantizedLinear):
@classmethod
def _init_cls(cls, mod, dtype, qweight):
qlinear = cls(
mod.weight.size()[1],
mod.weight.size()[0],
mod.mp_group,
mod.bias,
dtype=dtype,
)
# For bias handling, please refer to the comment in __init__ of _IPEXLinearAllreduce
qlinear.set_weight_bias(qweight, None)
return qlinear
@classmethod
@functools.lru_cache(None)
def _float_module(cls):
deepspeed_modules = may_import_deepspeed_modules()
assert (
deepspeed_modules is not None
), "DynamicQuantizedLinearAllreduce requires deepspeed to be installed"
LinearAllreduce, _ = deepspeed_modules
_FLOAT_MODULE = [LinearAllreduce]
return _FLOAT_MODULE
def __init__(
self,
in_features,
out_features,
mp_group,
bias_value,
bias_=True,
dtype=torch.qint8,
):
# Save the original bias here
# For bias handling, please refer to the comment in __init__ of _IPEXLinearAllreduce
super().__init__(in_features, out_features, bias_, dtype=dtype)
self.mp_group = mp_group
self.original_bias = bias_value
def forward(self, x):
if self._packed_params.dtype == torch.qint8:
if self.version is None or self.version < 4:
Y = torch.ops.quantized.linear_dynamic(
x, self._packed_params._packed_params
)
else:
Y = torch.ops.quantized.linear_dynamic(
x, self._packed_params._packed_params, reduce_range=True
)
elif self._packed_params.dtype == torch.float16:
Y = torch.ops.quantized.linear_dynamic_fp16(
x, self._packed_params._packed_params
)
else:
raise RuntimeError("Unsupported dtype on dynamic quantized linear!")
output = Y.to(x.dtype)
if self.mp_group is not None:
torch.ops.deepspeed_comm.all_reduce(
output,
"sum",
"",
list(torch.arange(int(os.environ["WORLD_SIZE"]))),
int(os.environ["WORLD_SIZE"]),
)
if self.original_bias is not None:
output += self.original_bias
return output
def __repr__(self):
return "DynamicQuantizedLinearAllreduce()"
def convert(model, inplace=False):
r"""
Convert an FP32 prepared model to a model which will automatically insert fake quant
before a quantizable module or operator.
Args:
model (torch.nn.Module): The FP32 model to be convert.
inplace: (bool): It will change the given model in-place if True. The default value is ``False``.
Returns:
torch.nn.Module
"""
assert isinstance(
model, torch.nn.Module
), "Only support nn.Module convert for quantization path"
assert hasattr(
model, "q_config"
), "Please do prepare the model before doing convert"
if inplace:
convert_model = model
else:
try:
convert_model = copy_prepared_model(model)
except BaseException:
AssertionError(
False
), "The model's copy is failed, please try set inplace to True to do the convert"
# For weight only quantization. Activation's observer is also PlaceholderObserver.
if (
isinstance(convert_model.q_config.activation(), PlaceholderObserver)
and not convert_model.q_config.activation().is_dynamic
):
qconfig_spec = {
torch.nn.Linear: convert_model.q_config,
torch.nn.LSTM: convert_model.q_config,
torch.nn.GRU: convert_model.q_config,
torch.nn.LSTMCell: convert_model.q_config,
torch.nn.RNNCell: convert_model.q_config,
torch.nn.GRUCell: convert_model.q_config,
}
module_mappings = get_default_dynamic_quant_module_mappings().copy()
module_mappings[
torch.nn.Linear
] = nn.modules.weight_only_quantization.IpexWoqLinear
converted_model = torch.quantization.quantize_dynamic(
convert_model,
qconfig_spec=qconfig_spec,
dtype=torch.qint8,
mapping=module_mappings,
inplace=False,
)
return converted_model
# If the module's activation's qconfig is PlaceholderObserver,
# we can say that the module want to run dynamic quantization path.
if isinstance(convert_model.q_config.activation(), PlaceholderObserver):
module_mappings = get_default_dynamic_quant_module_mappings()
qconfig_spec = {
torch.nn.Linear: convert_model.q_config,
torch.nn.LSTM: convert_model.q_config,
torch.nn.GRU: convert_model.q_config,
torch.nn.LSTMCell: convert_model.q_config,
torch.nn.RNNCell: convert_model.q_config,
torch.nn.GRUCell: convert_model.q_config,
}
deepspeed_modules = may_import_deepspeed_modules()
if deepspeed_modules is not None:
LinearAllreduce, LinearLayer = deepspeed_modules
module_mappings.update(IPEX_DYNAMIC_QUANTIZATION_MODULE_CPU())
deepspeed_qconfig_spec = {
LinearLayer: convert_model.q_config,
LinearAllreduce: convert_model.q_config,
}
qconfig_spec.update(deepspeed_qconfig_spec)
return torch.quantization.quantize_dynamic(
convert_model,
qconfig_spec=qconfig_spec,
mapping=module_mappings,
inplace=True,
)
# Convert linear, conv, and Embedding's weight dtype when use autocast,
# which will reduce the dtype conversion.
# TODO: check whether can be removed or not?
if torch.is_autocast_cpu_enabled() and core.get_autocast_dtype() == torch.bfloat16:
convert_model = nn.utils._model_convert.convert_model_data_type(
convert_model, torch.bfloat16
)[1]
convert_model = auto_convert(convert_model)
return convert_model
| 14,603 | 38.684783 | 124 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/fx/concat_linear.py | import torch
import torch.nn as nn
import torch.fx as fx
import torch.fx.experimental.optimization as optimization
import _operator
import copy
import warnings
def concat_linear(model: fx.GraphModule, inplace=False) -> fx.GraphModule:
def concat(compatible_layers, modules):
if len(compatible_layers) < 2:
return
base_linear = modules[compatible_layers[0].target]
input_channel = base_linear.weight.shape[1]
dtype = base_linear.weight.dtype
device = base_linear.weight.device
with_bias = base_linear.bias is not None
weights = [modules[layer.target].weight for layer in compatible_layers]
output_channels = [w.shape[0] for w in weights]
output_channel = sum(output_channels)
concated_weights = torch.concat(weights, dim=0)
if base_linear.bias is not None:
bias = [modules[layer.target].bias for layer in compatible_layers]
concated_bias = torch.concat(bias, dim=0)
concat_linear_ = nn.Linear(
input_channel, output_channel, with_bias, device, dtype
)
concat_linear_.weight = torch.nn.Parameter(
concated_weights, weights[0].requires_grad
)
if with_bias:
concat_linear_.bias = torch.nn.Parameter(
concated_bias, bias[0].requires_grad
)
return concat_linear_, output_channels
def collectLinearNodes(graph: fx.graph.Graph, modules: list):
grouped_linear_nodes = {}
linear_inputs = []
for node in graph.nodes:
if node.target not in modules:
continue
if type(modules[node.target]) != nn.Linear:
continue
linear_input = node.args[0]
if linear_input not in grouped_linear_nodes:
grouped_linear_nodes[linear_input] = [node]
linear_inputs.append(linear_input)
else:
grouped_linear_nodes[linear_input].append(node)
return grouped_linear_nodes, linear_inputs
def canConcatLinear(base_linear, other_linear):
def check_compatible(base_tensor, other_tensor):
if base_tensor is None:
return other_tensor is None
if base_tensor.device != other_tensor.device:
return False
if base_tensor.dtype != other_tensor.dtype:
return False
if base_tensor.requires_grad != other_tensor.requires_grad:
return False
if base_tensor.dim() != other_tensor.dim():
return False
if base_tensor.dim() > 1:
if base_tensor.shape[1:] != other_tensor.shape[1:]:
return False
return True
return check_compatible(
base_linear.weight, other_linear.weight
) and check_compatible(base_linear.bias, other_linear.bias)
def concatLinearNodes(
grouped_linear_nodes: dict,
linear_inputs: list,
modules: list,
graph: fx.graph.Graph,
):
if len(linear_inputs) == 0:
return
for linear_input in linear_inputs:
linear_nodes = grouped_linear_nodes[linear_input]
if len(linear_nodes) < 2:
continue
base_node = linear_nodes[0]
compatible_layers = [base_node]
for other_node in linear_nodes[1:]:
if canConcatLinear(
modules[base_node.target], modules[other_node.target]
):
compatible_layers.append(other_node)
concated_linear_, output_channels = concat(compatible_layers, modules)
with graph.inserting_after(base_node):
split = graph.call_function(
torch.split, (base_node, output_channels), {"dim": -1}
)
with graph.inserting_after(split):
getitem_fn = _operator.getitem
getitems = [
graph.call_function(getitem_fn, (split, i))
for i in range(len(output_channels))
]
for node, getitem_node in zip(compatible_layers, getitems):
node.replace_all_uses_with(getitem_node)
if node is not base_node:
graph.erase_node(node)
split.update_arg(0, base_node)
optimization.replace_node_module(base_node, modules, concated_linear_)
_model: fx.GraphModule = model
if not inplace:
_model = copy.deepcopy(model)
modules = dict(_model.named_modules())
_graph: fx.graph.Graph = _model.graph
if not inplace:
_graph = copy.deepcopy(_graph)
grouped_linear_nodes, linear_inputs = collectLinearNodes(_graph, modules)
concatLinearNodes(grouped_linear_nodes, linear_inputs, modules, _graph)
del grouped_linear_nodes
del linear_inputs
return fx.GraphModule(_model, _graph)
def _concat_linear(model: torch.nn.Module, inplace=False) -> fx.GraphModule:
# if native symbolic trace failed, try transformer symbolic trace
import sys
if "diffusers" in sys.modules:
diffusers = sys.modules["diffusers"]
import torch._dynamo as dynamo
def apply_concat_linear_on_unet(unet):
def prepare_input_for_attn(BasicTransformerBlock):
in1 = BasicTransformerBlock.attn1.to_q.in_features
in2 = BasicTransformerBlock.attn2.to_v.in_features
# The first dimension of hd/ehd (2) is related to user given batch size
# The second dimension of hd (4096, 1024, 256) is related to user ginve h, w
# The second dimension of ehd (77) is max-seq-lenght from text-encoder
# All dimensions above cannot be got from unet model
# We can hardcode this because the guards of dynamo export do not require
# Concrete shapes on these dimensions with hd and ehd
ehd = torch.rand(2, 77, in2)
hd_dict = {
320: (2, 4096, 320),
640: (2, 1024, 640),
1280: (2, 256, 1280),
}
hd_shape = hd_dict[in1]
hd = torch.rand(hd_shape)
return hd, ehd
def apply_concat_linear_on_BasicTransformerBlock(BasicTransformerBlock):
if isinstance(
BasicTransformerBlock,
diffusers.models.attention.BasicTransformerBlock,
):
hd, ehd = prepare_input_for_attn(BasicTransformerBlock)
inputs1 = {
"hidden_states": hd,
"encoder_hidden_states": None,
"attention_mask": None,
}
inputs2 = {
"hidden_states": hd,
"encoder_hidden_states": ehd,
"attention_mask": None,
}
gm = dynamo.export(BasicTransformerBlock.attn1, **inputs1)[0]
concat_gm1 = concat_linear(gm)
BasicTransformerBlock.attn1 = concat_gm1
gm = dynamo.export(BasicTransformerBlock.attn2, **inputs2)[0]
concat_gm2 = concat_linear(gm)
BasicTransformerBlock.attn2 = concat_gm2
return
for child in unet.children():
if isinstance(child, diffusers.models.attention.BasicTransformerBlock):
apply_concat_linear_on_BasicTransformerBlock(child)
apply_concat_linear_on_unet(child)
try:
unet = diffusers.models.unet_2d_condition.UNet2DConditionModel
if isinstance(model, unet):
apply_concat_linear_on_unet(model)
return model
except BaseException:
warnings.warn("failed to apply concat_linear on unet, please report bugs")
if "transformers" in sys.modules:
def is_transfomer_model(model):
name = model.__class__.__module__
return name.startswith("transformers.models.")
if is_transfomer_model(model):
try:
from transformers.utils.fx import symbolic_trace as hf_symbolic_trace
except ImportError:
# fx are not exposed in transformers.utils
warnings.warn(
"failed to import transformers symbolic_trace, cannnot apply concat linear"
)
try:
model: fx.GraphModule = hf_symbolic_trace(
model, input_names=["input_ids", "attention_mask", "token_type_ids"]
)
return concat_linear(model, inplace)
except BaseException:
warnings.warn(
"failed to symbolic trace model with transformers symbolic_trace, cannnot apply concat linear"
)
else:
try:
model: fx.GraphModule = fx.symbolic_trace(model)
return concat_linear(model, inplace)
except BaseException:
warnings.warn(
"pytorch native symbolic trace failed, may cannnot apply concat linear"
)
return model
| 9,423 | 41.071429 | 114 | py |
Seq2Sick | Seq2Sick-master/opts.py | import argparse
from onmt.modules.SRU import CheckSRU
def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Embedding Options
group = parser.add_argument_group('Model-Embeddings')
group.add_argument('-src_word_vec_size', type=int, default=500,
help='Word embedding size for src.')
group.add_argument('-tgt_word_vec_size', type=int, default=500,
help='Word embedding size for tgt.')
group.add_argument('-word_vec_size', type=int, default=-1,
help='Word embedding size for src and tgt.')
group.add_argument('-share_decoder_embeddings', action='store_true',
help="""Use a shared weight matrix for the input and
output word embeddings in the decoder.""")
group.add_argument('-share_embeddings', action='store_true',
help="""Share the word embeddings between encoder
and decoder. Need to use shared dictionary for this
option.""")
group.add_argument('-position_encoding', action='store_true',
help="""Use a sin to mark relative words positions.
Necessary for non-RNN style models.
""")
group = parser.add_argument_group('Model-Embedding Features')
group.add_argument('-feat_merge', type=str, default='concat',
choices=['concat', 'sum', 'mlp'],
help="""Merge action for incorporating features embeddings.
Options [concat|sum|mlp].""")
group.add_argument('-feat_vec_size', type=int, default=-1,
help="""If specified, feature embedding sizes
will be set to this. Otherwise, feat_vec_exponent
will be used.""")
group.add_argument('-feat_vec_exponent', type=float, default=0.7,
help="""If -feat_merge_size is not set, feature
embedding sizes will be set to N^feat_vec_exponent
where N is the number of values the feature takes.""")
# Encoder-Deocder Options
group = parser.add_argument_group('Model- Encoder-Decoder')
group.add_argument('-model_type', default='text',
help="""Type of source model to use. Allows
the system to incorporate non-text inputs.
Options are [text|img|audio].""")
group.add_argument('-encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],
help="""Type of encoder layer to use. Non-RNN layers
are experimental. Options are
[rnn|brnn|mean|transformer|cnn].""")
group.add_argument('-decoder_type', type=str, default='rnn',
choices=['rnn', 'transformer', 'cnn'],
help="""Type of decoder layer to use. Non-RNN layers
are experimental. Options are
[rnn|transformer|cnn].""")
group.add_argument('-layers', type=int, default=-1,
help='Number of layers in enc/dec.')
group.add_argument('-enc_layers', type=int, default=2,
help='Number of layers in the encoder')
group.add_argument('-dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add_argument('-rnn_size', type=int, default=500,
help='Size of rnn hidden states')
group.add_argument('-cnn_kernel_width', type=int, default=3,
help="""Size of windows in the cnn, the kernel_size is
(cnn_kernel_width, 1) in conv layer""")
group.add_argument('-input_feed', type=int, default=1,
help="""Feed the context vector at each time step as
additional input (via concatenation with the word
embeddings) to the decoder.""")
group.add_argument('-rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
action=CheckSRU,
help="""The gate type to use in the RNNs""")
# group.add_argument('-residual', action="store_true",
# help="Add residual connections between RNN layers.")
group.add_argument('-brnn', action=DeprecateAction,
help="Deprecated, use `encoder_type`.")
group.add_argument('-brnn_merge', default='concat',
choices=['concat', 'sum'],
help="Merge action for the bidir hidden states")
group.add_argument('-context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="""Type of context gate to use.
Do not select for no context gate.""")
# Attention options
group = parser.add_argument_group('Model- Attention')
group.add_argument('-global_attention', type=str, default='general',
choices=['dot', 'general', 'mlp'],
help="""The attention type to use:
dotprod or general (Luong) or MLP (Bahdanau)""")
# Genenerator and loss options.
group.add_argument('-copy_attn', action="store_true",
help='Train copy attention layer.')
group.add_argument('-copy_attn_force', action="store_true",
help='When available, train to copy.')
group.add_argument('-coverage_attn', action="store_true",
help='Train a coverage attention layer.')
group.add_argument('-lambda_coverage', type=float, default=1,
help='Lambda value for coverage.')
def preprocess_opts(parser):
# Data options
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="""Type of the source input.
Options are [text|img].""")
group.add_argument('-train_src', required=True,
help="Path to the training source data")
group.add_argument('-train_tgt', required=True,
help="Path to the training target data")
group.add_argument('-valid_src', required=True,
help="Path to the validation source data")
group.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
group.add_argument('-src_dir', default="",
help="Source directory for image or audio files.")
group.add_argument('-save_data', required=True,
help="Output file for the prepared data")
group.add_argument('-max_shard_size', type=int, default=0,
help="""For text corpus of large volume, it will
be divided into shards of this size to preprocess.
If 0, the data will be handled as a whole. The unit
is in bytes. Optimal value should be multiples of
64 bytes.""")
# Dictionary options, for text corpus
group = parser.add_argument_group('Vocab')
group.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
group.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
group.add_argument('-features_vocabs_prefix', type=str, default='',
help="Path prefix to existing features vocabularies")
group.add_argument('-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
group.add_argument('-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
group.add_argument('-src_words_min_frequency', type=int, default=0)
group.add_argument('-tgt_words_min_frequency', type=int, default=0)
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
# Truncation options, for text corpus
group = parser.add_argument_group('Pruning')
group.add_argument('-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
group.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
group.add_argument('-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
group.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
group.add_argument('-lower', action='store_true', help='lowercase data')
# Data processing options
group = parser.add_argument_group('Random')
group.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
group.add_argument('-seed', type=int, default=3435,
help="Random seed")
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
# Options most relevant to speech
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add_argument('-window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add_argument('-window', default='hamming',
help="Window type for spectrogram generation.")
def train_opts(parser):
# Model loading/saving options
group = parser.add_argument_group('General')
group.add_argument('-data', required=True,
help="""Path prefix to the ".train.pt" and
".valid.pt" file path from preprocess.py""")
group.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_epochN_PPL.pt where PPL is the
validation perplexity""")
# GPU
group.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Use CUDA on the listed devices.")
group.add_argument('-seed', type=int, default=-1,
help="""Random seed used for the experiments
reproducibility.""")
# Init options
group = parser.add_argument_group('Initialization')
group.add_argument('-start_epoch', type=int, default=1,
help='The epoch from which to start')
group.add_argument('-param_init', type=float, default=0.1,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init).
Use 0 to not use initialization""")
group.add_argument('-train_from', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model's state_dict.""")
# Pretrained word vectors
group.add_argument('-pre_word_vecs_enc',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the encoder side.
See README for specific formatting instructions.""")
group.add_argument('-pre_word_vecs_dec',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the decoder side.
See README for specific formatting instructions.""")
# Fixed word vectors
group.add_argument('-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
group.add_argument('-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the encoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add_argument('-batch_size', type=int, default=64,
help='Maximum batch size')
group.add_argument('-max_generator_batches', type=int, default=32,
help="""Maximum batches of words in a sequence to run
the generator on in parallel. Higher is faster, but
uses more memory.""")
group.add_argument('-epochs', type=int, default=13,
help='Number of training epochs')
group.add_argument('-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam'],
help="""Optimization method.""")
group.add_argument('-adagrad_accumulator_init', type=float, default=0,
help="""Initializes the accumulator values in adagrad.
Mirrors the initial_accumulator_value option
in the tensorflow adagrad (use 0.1 for their default).
""")
group.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to
max_grad_norm""")
group.add_argument('-dropout', type=float, default=0.3,
help="Dropout probability; applied in LSTM stacks.")
group.add_argument('-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add_argument('-adam_beta1', type=float, default=0.9,
help="""The beta1 parameter used by Adam.
Almost without exception a value of 0.9 is used in
the literature, seemingly giving good results,
so we would discourage changing this value from
the default without due consideration.""")
group.add_argument('-adam_beta2', type=float, default=0.999,
help="""The beta2 parameter used by Adam.
Typically a value of 0.999 is recommended, as this is
the value suggested by the original paper describing
Adam, and is also the value adopted in other frameworks
such as Tensorflow and Kerras, i.e. see:
https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/ .
Whereas recently the paper "Attention is All You Need"
suggested a value of 0.98 for beta2, this parameter may
not work well for normal models / default
baselines.""")
group.add_argument('-label_smoothing', type=float, default=0.0,
help="""Label smoothing value epsilon.
Probabilities of all non-true labels
will be smoothed by epsilon / (vocab_size - 1).
Set to zero to turn off label smoothing.
For more detailed information, see:
https://arxiv.org/abs/1512.00567""")
# learning rate
group = parser.add_argument_group('Optimization- Rate')
group.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate.
Recommended settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
group.add_argument('-learning_rate_decay', type=float, default=0.5,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) epoch has gone past
start_decay_at""")
group.add_argument('-start_decay_at', type=int, default=8,
help="""Start decaying every epoch after and including this
epoch""")
group.add_argument('-start_checkpoint_at', type=int, default=0,
help="""Start checkpointing every epoch after and including
this epoch""")
group.add_argument('-decay_method', type=str, default="",
choices=['noam'], help="Use a custom decay rate.")
group.add_argument('-warmup_steps', type=int, default=4000,
help="""Number of warmup steps for custom decay.""")
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=50,
help="Print stats at this interval.")
group.add_argument('-exp_host', type=str, default="",
help="Send logs to this crayon server.")
group.add_argument('-exp', type=str, default="",
help="Name of the experiment for logging.")
group = parser.add_argument_group('Speech')
# Options most relevant to speech
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
def translate_opts(parser):
group = parser.add_argument_group('Model')
group.add_argument('-model', required=True,
help='Path to model .pt file')
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="Type of the source input. Options: [text|img].")
group.add_argument('-src', required=True,
help="""Source sequence to decode (one line per
sequence)""")
group.add_argument('-src_dir', default="",
help='Source directory for image or audio files')
group.add_argument('-tgt',
help='True target sequence (optional)')
group.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
# Options most relevant to summarization.
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
group = parser.add_argument_group('Beam')
group.add_argument('-beam_size', type=int, default=5,
help='Beam size')
# Alpha and Beta values for Google Length + Coverage penalty
# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7
group.add_argument('-alpha', type=float, default=0.,
help="""Google NMT length penalty parameter
(higher = longer generation)""")
group.add_argument('-beta', type=float, default=-0.,
help="""Coverage penalty parameter""")
group.add_argument('-max_sent_length', type=int, default=100,
help='Maximum sentence length.')
group.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the
source token that had highest attention weight. If
phrase_table is provided, it will lookup the
identified source token and give the corresponding
target token. If it is not provided(or the identified
source token does not exist in the table) then it
will copy the source token""")
group = parser.add_argument_group('Logging')
group.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add_argument('-attn_debug', action="store_true",
help='Print best attn for each word')
group.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
group.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
group = parser.add_argument_group('Efficiency')
group.add_argument('-batch_size', type=int, default=30,
help='Batch size')
group.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
group.add_argument('-gl', type=bool, default=False,
help="Group Loss or not")
# Options most relevant to speech.
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help='Window size for spectrogram in seconds')
group.add_argument('-window_stride', type=float, default=.01,
help='Window stride for spectrogram in seconds')
group.add_argument('-window', default='hamming',
help='Window type for spectrogram generation')
def attack_opts(parser):
group = parser.add_argument_group('Model')
group.add_argument('-model', required=True,
help='Path to model .pt file')
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="Type of the source input. Options: [text|img].")
group.add_argument('-src', required=True,
help="""Source sequence to decode (one line per
sequence)""")
group.add_argument('-src_dir', default="",
help='Source directory for image or audio files')
group.add_argument('-tgt',
help='True target sequence (optional)')
group.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
# Options most relevant to summarization.
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
group = parser.add_argument_group('Beam')
group.add_argument('-beam_size', type=int, default=5,
help='Beam size')
# Alpha and Beta values for Google Length + Coverage penalty
# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7
group.add_argument('-alpha', type=float, default=0.,
help="""Google NMT length penalty parameter
(higher = longer generation)""")
group.add_argument('-beta', type=float, default=-0.,
help="""Coverage penalty parameter""")
group.add_argument('-max_sent_length', type=int, default=100,
help='Maximum sentence length.')
group.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the
source token that had highest attention weight. If
phrase_table is provided, it will lookup the
identified source token and give the corresponding
target token. If it is not provided(or the identified
source token does not exist in the table) then it
will copy the source token""")
group = parser.add_argument_group('Logging')
group.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add_argument('-attn_debug', action="store_true",
help='Print best attn for each word')
group.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
group.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
group = parser.add_argument_group('Efficiency')
group.add_argument('-batch_size', type=int, default=30,
help='Batch size')
group.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
group.add_argument('-gl', type=bool, default=False,
help="Group Loss or not")
group.add_argument('-gr', type=bool, default=False,
help="Gradient Regularization or not")
group.add_argument('-tar_dir', default="",
help="target keyword directory")
group.add_argument('-nn', type=bool, default=False,
help="Nearest neighbour search or not")
# Options most relevant to speech.
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help='Window size for spectrogram in seconds')
group.add_argument('-window_stride', type=float, default=.01,
help='Window stride for spectrogram in seconds')
group.add_argument('-window', default='hamming',
help='Window type for spectrogram generation')
def add_md_help_argument(parser):
parser.add_argument('-md', action=MarkdownHelpAction,
help='print Markdown-formatted help text and exit.')
# MARKDOWN boilerplate
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class MarkdownHelpFormatter(argparse.HelpFormatter):
"""A really bare-bones argparse help formatter that generates valid markdown.
This will generate something like:
usage
# **section heading**:
## **--argument-one**
```
argument-one help text
```
"""
def _format_usage(self, usage, actions, groups, prefix):
return ""
def format_help(self):
print(self._prog)
self._root_section.heading = '# Options: %s' % self._prog
return super(MarkdownHelpFormatter, self).format_help()
def start_section(self, heading):
super(MarkdownHelpFormatter, self)\
.start_section('### **%s**' % heading)
def _format_action(self, action):
if action.dest == "help" or action.dest == "md":
return ""
lines = []
lines.append('* **-%s %s** ' % (action.dest,
"[%s]" % action.default
if action.default else "[]"))
if action.help:
help_text = self._expand_help(action)
lines.extend(self._split_lines(help_text, 80))
lines.extend(['', ''])
return '\n'.join(lines)
class MarkdownHelpAction(argparse.Action):
def __init__(self, option_strings,
dest=argparse.SUPPRESS, default=argparse.SUPPRESS,
**kwargs):
super(MarkdownHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser.formatter_class = MarkdownHelpFormatter
parser.print_help()
parser.exit()
class DeprecateAction(argparse.Action):
def __init__(self, option_strings, dest, help=None, **kwargs):
super(DeprecateAction, self).__init__(option_strings, dest, nargs=0,
help=help, **kwargs)
def __call__(self, parser, namespace, values, flag_name):
help = self.help if self.help is not None else ""
msg = "Flag '%s' is deprecated. %s" % (flag_name, help)
raise argparse.ArgumentTypeError(msg)
| 28,909 | 48.588336 | 88 | py |
Seq2Sick | Seq2Sick-master/attack.py | #!/usr/bin/env python
from __future__ import division, unicode_literals
import os
import argparse
import math
import codecs
import torch
from torch.autograd import Variable
import numpy as np
from itertools import count
import onmt.io
import onmt.translate
import onmt
import onmt.ModelConstructor
import onmt.modules
import opts
import torch.nn as nn
parser = argparse.ArgumentParser(
description='attack.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.attack_opts(parser)
opt = parser.parse_args()
def attack(all_word_embedding, label_onehot, translator, src, batch, new_embedding, input_embedding, modifier, const, GROUP_LASSO, TARGETED, GRAD_REG, NN):
if TARGETED:
lr_a = [0.1,0.5]
else:
lr_a = [2]
if NN:
lr_a= [0.1]
cur_best = Variable(torch.zeros(1)).cuda()
cur_best.data[0] = 999
cur_best_modi = 999
# FLAG = False
m = label_onehot.size()[0]
for lr in lr_a:
CFLAG = True
for k in range(200):
#loss1=0
new_word_list=[]
loss1 = Variable(torch.zeros(1)).cuda()
loss2 = Variable(torch.zeros(1)).cuda()
if NN:
for i in range(input_embedding.size()[0]):
new_embedding[i] = modifier[i] + input_embedding[i]
else:
for i in range(input_embedding.size()[0]):
new_embedding[i] = modifier[i] + input_embedding[i]
new_placeholder = new_embedding[i].data
temp_place = new_placeholder.expand_as(all_word_embedding)
new_dist = torch.norm(temp_place - all_word_embedding.data, 2 ,2)
v_dist = Variable(new_dist, requires_grad = True)
_ , new_word = torch.min(new_dist,0)
min_dist, _ = torch.min(v_dist, 0)
new_word_list.append(new_word)
new_embedding.data[i] = all_word_embedding[new_word[0]].data
del temp_place
output_a, attn, output_i= translator.getOutput(new_embedding, src, batch)
if TARGETED:
n = output_a.size()[0]
mask = None
for iter_ind in range(m):
if mask:
if mask == output_a.size()[0]-1:
#print("b")
#print(t_loss.data[0],mask,output_a.size()[0], fake_onehot.size()[0])
output_a = output_a[0:mask,:]
fake_onehot = fake_onehot[0:mask,:]
else:
output_a = torch.cat((output_a[0:mask,:],output_a[mask+1:,:]))
fake_onehot = torch.cat((fake_onehot[0:mask,:], fake_onehot[mask+1:,:]))
mask = None
placeholder = label_onehot[iter_ind].clone()
fake_onehot = placeholder.expand_as(output_a)
real, reali = torch.max (torch.mul(output_a, fake_onehot),1)
other, otheri = torch.max(torch.mul(output_a, (1-fake_onehot)) - fake_onehot*10000, 1)
t_loss, t_pos = torch.min(torch.clamp(other-real, min=0),0)
if t_loss.data[0] < 0:
mask = t_pos.data[0]
# print(mask)
# if FLAG:
# print(t_loss.data)
loss1 = loss1 + t_loss
else:
if output_a.size()[0] > label_onehot.size()[0]:
output_a = output_a[:label_onehot.size()[0],:]
else:
label_onehot = label_onehot[:output_a.size()[0],:]
real, reali = torch.max(torch.mul(output_a, label_onehot),1)
other, otheri = torch.max(torch.mul(output_a, (1-label_onehot)),1)
loss1 = torch.sum(torch.clamp(real-other,min=0))
print(loss1.data[0],"\t", torch.norm(modifier.data))
if loss1.data[0]<= 0 :
#print(loss1.data[0],"\t", torch.norm(modifier.data))
if torch.norm(modifier.data) < cur_best_modi:
print(loss1.data[0],"\t", torch.norm(modifier.data))
cur_best_modi = torch.norm(modifier.data)
best_word = new_word_list
best_output_a = output_a.clone()
best_attn = attn
best_output_i = output_i.clone()
#
#break
if loss1.data[0] < cur_best.data[0]:
print(cur_best.data[0],"\t", torch.norm(modifier.data))
cur_best = loss1.clone()
best_word = new_word_list
best_output_a = output_a.clone()
best_attn = attn
best_output_i = output_i.clone()
# FLAG = True
# else:
# FLAG = False
# print(cur_best.data[0],"\t", torch.norm(modifier.data))
if k == 199:
new_word_list = best_word
output_a = best_output_a
attn = best_attn
output_i = best_output_i
if cur_best.data[0] <= 0:
break
CFLAG = False
print("lr=",lr)
loss2 = torch.max(modifier)
#loss2 = torch.sum(modifier * modifier)
#print(loss2.data[0])
if GRAD_REG:
loss = const * loss1 + min_dist + loss2
else:
loss = const * loss1 + loss2
loss.backward(retain_graph=True)
#modifier.data -= lr * modifier.grad.data
if GROUP_LASSO:
gamma = lr
l2dist = torch.norm(modifier, 2, 2)
lidist,_ = torch.max(modifier,2)
#print(lidist)
for j in range(input_embedding.size()[0]):
if l2dist.data[j][0] > gamma * const:
modifier.data[j] = modifier.data[j] - gamma*const* modifier.data[j]/l2dist.data[j][0]
else:
modifier.data[j] = torch.zeros(1,500).cuda()
modifier.data -= lr * modifier.grad.data
modifier.grad.data.zero_()
if CFLAG:
break
return modifier, output_a, attn, new_word_list, output_i, CFLAG
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
#print(opt)
# Load the model.
fields, model, model_opt = \
onmt.ModelConstructor.load_test_model(opt, dummy_opt.__dict__)
#print(model_opt)
n_src = len(fields['src'].vocab)
n_tgt = len(fields['tgt'].vocab)
# File to write sentences to.
out_file = codecs.open(opt.output, 'w', 'utf-8')
# Test data
data = onmt.io.build_dataset(fields, opt.data_type,
opt.src, opt.tgt,
src_dir=opt.src_dir,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
use_filter_pred=False)
test_data = onmt.io.OrderedIterator(
dataset=data, device=opt.gpu,
batch_size=1, train=False, sort=False,
shuffle=False)
# Translator
scorer = onmt.translate.GNMTGlobalScorer(opt.alpha, opt.beta)
translator = onmt.translate.Translator(model, fields,
beam_size=opt.beam_size,
n_best=opt.n_best,
global_scorer=scorer,
max_length=opt.max_sent_length,
copy_attn=model_opt.copy_attn,
cuda=opt.cuda,
beam_trace=opt.dump_beam != "")
builder = onmt.translate.TranslationBuilder(
data, translator.fields,
opt.n_best, opt.replace_unk, opt.tgt)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
pdist = nn.PairwiseDistance(p=2)
if opt.tar_dir:
TARGETED = True
else:
TARGETED = False
GROUP_LASSO = opt.gl
GRAD_REG = opt.gr
NN = opt.nn
const = 1
if TARGETED:
targets_list = []
tar = onmt.io.build_dataset(fields, opt.data_type,
opt.src, opt.tar_dir,
src_dir=opt.tar_dir,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
use_filter_pred=False)
tar_data = onmt.io.OrderedIterator(
dataset=tar, device=opt.gpu,
batch_size=1, train=False, sort=False,
shuffle=False)
all_index = Variable(torch.LongTensor(range(n_src)).view(n_src,1,1).cuda())
all_word_embedding, _ = translator.getEmbedding(all_index, FLAG=False)
for batch in test_data:
batch_data = translator.translate_batch(batch, data)
predBatch = batch_data["predictions"]
translations = builder.from_batch(batch_data)
if TARGETED:
target_list = []
for target in tar_data:
target_inds = onmt.io.make_features(target, 'tgt')
target_list.append(target_inds.data.cpu().view(-1,1))
#print(target_list)
true_label = target_list[0]
#print(true_label)
else:
label_data = translator.translate_batch(batch, data)
pred = label_data["predictions"]
true_label=torch.LongTensor(pred[0][0]).view(-1,1)
label_onehot = torch.FloatTensor(true_label.size()[0], n_tgt)
label_onehot.zero_()
label_onehot.scatter_(1,true_label,1)
if TARGETED:
label_onehot = label_onehot[1:-1,:]
label_onehot = Variable(label_onehot, requires_grad = False).cuda()
#print(label_onehot)
#print(batch)
input_embedding, src= translator.getEmbedding(batch)
#print(src)
hidden_size = input_embedding.size()[2]
if GROUP_LASSO:
modifier_initial = torch.zeros(input_embedding.size()).cuda()
else:
modifier_initial = torch.zeros(input_embedding.size()).cuda()
modifier = Variable(modifier_initial, requires_grad = True)
#print(input_embedding)
new_embedding = input_embedding.clone()
modifier, output_a, attn, new_word, output_i, CFLAG = attack(all_word_embedding, label_onehot, translator, src, batch, new_embedding, input_embedding, modifier, const, GROUP_LASSO, TARGETED, GRAD_REG, NN)
words_list = builder.get_word(output_i, attn, batch)
print(words_list)
new_embedding = input_embedding.clone()
new_embedding = modifier + input_embedding
if NN:
changed_words=[]
for i in range(input_embedding.size()[0]):
dis = []
for dic_embedding_index in range(all_word_embedding.size()[0]):
#if dic_embedding_index == src.data[index][0][0]:
# continue
new_dist = pdist(new_embedding[i], all_word_embedding[dic_embedding_index])
dis.append(new_dist.data[0][0])
print(min(dis), np.argmin(dis))
changed_words.append(np.argmin(dis))
print(changed_words)
new_word = changed_words
#print(new_word)
newsrc = src.clone()
for i in range(input_embedding.size()[0]):
newsrc.data[i][0] = new_word[i]
print(builder.get_source(newsrc, batch))
new_pred = translator.translate_batch(batch,data, newsrc=newsrc, FLAG=False)
predBatch = builder.from_batch(new_pred)
for trans in predBatch:
n_best_preds = [" ".join(pred) for pred in trans.pred_sents[:opt.n_best]]
for trans in translations:
o_preds = [" ".join(pred) for pred in trans.pred_sents[:opt.n_best]]
print(n_best_preds)
out_file.write(''.join(builder.get_source(newsrc, batch)))
out_file.write('\t\t')
out_file.write(n_best_preds[0])
out_file.write('\t\t')
out_file.write(o_preds[0])
out_file.write('\n')
out_file.flush()
if __name__ == "__main__":
main()
| 13,084 | 40.805112 | 212 | py |
Seq2Sick | Seq2Sick-master/translate.py | #!/usr/bin/env python
from __future__ import division, unicode_literals
import os
import argparse
import math
import codecs
import torch
from itertools import count
import onmt.io
import onmt.translate
import onmt
import onmt.ModelConstructor
import onmt.modules
import opts
parser = argparse.ArgumentParser(
description='translate.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.translate_opts(parser)
opt = parser.parse_args()
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Load the model.
fields, model, model_opt = \
onmt.ModelConstructor.load_test_model(opt, dummy_opt.__dict__)
# File to write sentences to.
out_file = codecs.open(opt.output, 'w', 'utf-8')
# Test data
data = onmt.io.build_dataset(fields, opt.data_type,
opt.src, opt.tgt,
src_dir=opt.src_dir,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
use_filter_pred=False)
test_data = onmt.io.OrderedIterator(
dataset=data, device=opt.gpu,
batch_size=opt.batch_size, train=False, sort=False,
shuffle=False)
# Translator
scorer = onmt.translate.GNMTGlobalScorer(opt.alpha, opt.beta)
translator = onmt.translate.Translator(model, fields,
beam_size=opt.beam_size,
n_best=opt.n_best,
global_scorer=scorer,
max_length=opt.max_sent_length,
copy_attn=model_opt.copy_attn,
cuda=opt.cuda,
beam_trace=opt.dump_beam != "")
builder = onmt.translate.TranslationBuilder(
data, translator.fields,
opt.n_best, opt.replace_unk, opt.tgt)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
for batch in test_data:
batch_data = translator.translate_batch(batch, data)
translations = builder.from_batch(batch_data)
for trans in translations:
pred_score_total += trans.pred_scores[0]
pred_words_total += len(trans.pred_sents[0])
if opt.tgt:
gold_score_total += trans.gold_score
gold_words_total += len(trans.gold_sent)
n_best_preds = [" ".join(pred)
for pred in trans.pred_sents[:opt.n_best]]
out_file.write('\n'.join(n_best_preds))
out_file.write('\n')
out_file.flush()
if opt.verbose:
sent_number = next(counter)
output = trans.log(sent_number)
os.write(1, output.encode('utf-8'))
def report_score(name, score_total, words_total):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / words_total,
name, math.exp(-score_total/words_total)))
report_score('PRED', pred_score_total, pred_words_total)
if opt.tgt:
report_score('GOLD', gold_score_total, gold_words_total)
if opt.dump_beam:
import json
json.dump(translator.beam_accum,
codecs.open(opt.dump_beam, 'w', 'utf-8'))
if __name__ == "__main__":
main()
| 3,842 | 31.846154 | 74 | py |
Seq2Sick | Seq2Sick-master/train.py | #!/usr/bin/env python
from __future__ import division
import argparse
import glob
import os
import sys
import random
import torch
import torch.nn as nn
from torch import cuda
import onmt
import onmt.io
import onmt.Models
import onmt.ModelConstructor
import onmt.modules
from onmt.Utils import use_gpu
import opts
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# opts.py
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
if opt.word_vec_size != -1:
opt.src_word_vec_size = opt.word_vec_size
opt.tgt_word_vec_size = opt.word_vec_size
if opt.layers != -1:
opt.enc_layers = opt.layers
opt.dec_layers = opt.layers
opt.brnn = (opt.encoder_type == "brnn")
if opt.seed > 0:
random.seed(opt.seed)
torch.manual_seed(opt.seed)
if opt.rnn_type == "SRU" and not opt.gpuid:
raise AssertionError("Using SRU requires -gpuid set.")
if torch.cuda.is_available() and not opt.gpuid:
print("WARNING: You have a CUDA device, should run with -gpuid 0")
if opt.gpuid:
cuda.set_device(opt.gpuid[0])
if opt.seed > 0:
torch.cuda.manual_seed(opt.seed)
if len(opt.gpuid) > 1:
sys.stderr.write("Sorry, multigpu isn't supported yet, coming soon!\n")
sys.exit(1)
# Set up the Crayon logging server.
if opt.exp_host != "":
from pycrayon import CrayonClient
cc = CrayonClient(hostname=opt.exp_host)
experiments = cc.get_experiment_names()
print(experiments)
if opt.exp in experiments:
cc.remove_experiment(opt.exp)
experiment = cc.create_experiment(opt.exp)
def report_func(epoch, batch, num_batches,
start_time, lr, report_stats):
"""
This is the user-defined batch-level traing progress
report function.
Args:
epoch(int): current epoch count.
batch(int): current batch count.
num_batches(int): total number of batches.
start_time(float): last report time.
lr(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if batch % opt.report_every == -1 % opt.report_every:
report_stats.output(epoch, batch+1, num_batches, start_time)
if opt.exp_host:
report_stats.log("progress", experiment, lr)
report_stats = onmt.Statistics()
return report_stats
def make_train_data_iter(train_dataset, opt):
"""
This returns user-defined train data iterator for the trainer
to iterate over during each train epoch. We implement simple
ordered iterator strategy here, but more sophisticated strategy
like curriculum learning is ok too.
"""
return onmt.io.OrderedIterator(
dataset=train_dataset, batch_size=opt.batch_size,
device=opt.gpuid[0] if opt.gpuid else -1,
repeat=False)
def make_valid_data_iter(valid_dataset, opt):
"""
This returns user-defined validate data iterator for the trainer
to iterate over during each validate epoch. We implement simple
ordered iterator strategy here, but more sophisticated strategy
is ok too.
"""
return onmt.io.OrderedIterator(
dataset=valid_dataset, batch_size=opt.batch_size,
device=opt.gpuid[0] if opt.gpuid else -1,
train=False, sort=True)
def make_loss_compute(model, tgt_vocab, dataset, opt):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, dataset, opt.copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(model.generator, tgt_vocab,
opt.label_smoothing)
if use_gpu(opt):
compute.cuda()
return compute
def train_model(model, train_dataset, valid_dataset,
fields, optim, model_opt):
train_iter = make_train_data_iter(train_dataset, opt)
valid_iter = make_valid_data_iter(valid_dataset, opt)
train_loss = make_loss_compute(model, fields["tgt"].vocab,
train_dataset, opt)
valid_loss = make_loss_compute(model, fields["tgt"].vocab,
valid_dataset, opt)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
data_type = train_dataset.data_type
trainer = onmt.Trainer(model, train_iter, valid_iter,
train_loss, valid_loss, optim,
trunc_size, shard_size, data_type)
for epoch in range(opt.start_epoch, opt.epochs + 1):
print('')
# 1. Train for one epoch on the training set.
train_stats = trainer.train(epoch, report_func)
print('Train perplexity: %g' % train_stats.ppl())
print('Train accuracy: %g' % train_stats.accuracy())
# 2. Validate on the validation set.
valid_stats = trainer.validate()
print('Validation perplexity: %g' % valid_stats.ppl())
print('Validation accuracy: %g' % valid_stats.accuracy())
# 3. Log to remote server.
if opt.exp_host:
train_stats.log("train", experiment, optim.lr)
valid_stats.log("valid", experiment, optim.lr)
# 4. Update the learning rate
trainer.epoch_step(valid_stats.ppl(), epoch)
# 5. Drop a checkpoint if needed.
if epoch >= opt.start_checkpoint_at:
trainer.drop_checkpoint(model_opt, epoch, fields, valid_stats)
def check_save_model_path():
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def load_dataset(data_type):
assert data_type in ["train", "valid"]
print("Loading %s data from '%s'" % (data_type, opt.data))
pts = glob.glob(opt.data + '.' + data_type + '.[0-9]*.pt')
if pts:
# Multiple onmt.io.*Dataset's, coalesce all.
# torch.load loads them imemediately, which might eat up
# too much memory. A lazy load would be better, but later
# when we create data iterator, it still requires these
# data to be loaded. So it seams we don't have a good way
# to avoid this now.
datasets = []
for pt in pts:
datasets.append(torch.load(pt))
dataset = onmt.io.ONMTDatasetBase.coalesce_datasets(datasets)
else:
# Only one onmt.io.*Dataset, simple!
dataset = torch.load(opt.data + '.' + data_type + '.pt')
print(' * number of %s sentences: %d' % (data_type, len(dataset)))
return dataset
def load_fields(train_dataset, valid_dataset, checkpoint):
data_type = train_dataset.data_type
fields = onmt.io.load_fields_from_vocab(
torch.load(opt.data + '.vocab.pt'), data_type)
fields = dict([(k, f) for (k, f) in fields.items()
if k in train_dataset.examples[0].__dict__])
# We save fields in vocab.pt, so assign them back to dataset here.
train_dataset.fields = fields
valid_dataset.fields = fields
if opt.train_from:
print('Loading vocab from checkpoint at %s.' % opt.train_from)
fields = onmt.io.load_fields_from_vocab(
checkpoint['vocab'], data_type)
if data_type == 'text':
print(' * vocabulary size. source = %d; target = %d' %
(len(fields['src'].vocab), len(fields['tgt'].vocab)))
else:
print(' * vocabulary size. target = %d' %
(len(fields['tgt'].vocab)))
return fields
def collect_report_features(fields):
src_features = onmt.io.collect_features(fields, side='src')
tgt_features = onmt.io.collect_features(fields, side='tgt')
for j, feat in enumerate(src_features):
print(' * src feature %d size = %d' % (j, len(fields[feat].vocab)))
for j, feat in enumerate(tgt_features):
print(' * tgt feature %d size = %d' % (j, len(fields[feat].vocab)))
def build_model(model_opt, opt, fields, checkpoint):
print('Building model...')
model = onmt.ModelConstructor.make_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
if len(opt.gpuid) > 1:
print('Multi gpu training: ', opt.gpuid)
model = nn.DataParallel(model, device_ids=opt.gpuid, dim=1)
print(model)
return model
def build_optim(model, checkpoint):
if opt.train_from:
print('Loading optimizer from checkpoint.')
optim = checkpoint['optim']
optim.optimizer.load_state_dict(
checkpoint['optim'].optimizer.state_dict())
else:
# what members of opt does Optim need?
optim = onmt.Optim(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_at=opt.start_decay_at,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
opt=opt
)
optim.set_parameters(model.parameters())
return optim
def main():
# Load train and validate data.
train_dataset = load_dataset("train")
valid_dataset = load_dataset("valid")
print(' * maximum batch size: %d' % opt.batch_size)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
print('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# I don't like reassigning attributes of opt: it's not clear.
opt.start_epoch = checkpoint['epoch'] + 1
else:
checkpoint = None
model_opt = opt
# Load fields generated from preprocess phase.
fields = load_fields(train_dataset, valid_dataset, checkpoint)
# Report src/tgt features.
collect_report_features(fields)
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
tally_parameters(model)
check_save_model_path()
# Build optimizer.
optim = build_optim(model, checkpoint)
# Do training.
train_model(model, train_dataset, valid_dataset, fields, optim, model_opt)
if __name__ == "__main__":
main()
| 11,059 | 30.781609 | 78 | py |
Seq2Sick | Seq2Sick-master/preprocess.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import codecs
import os
import glob
import sys
import torch
import onmt.io
import opts
def check_existing_pt_files(opt):
# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt
# when training, so check to avoid tampering with existing pt files
# or mixing them up.
for t in ['train', 'valid', 'vocab']:
pattern = opt.save_data + '.' + t + '*.pt'
if glob.glob(pattern):
sys.stderr.write("Please backup exisiting pt file: %s, "
"to avoid tampering!\n" % pattern)
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.preprocess_opts(parser)
opt = parser.parse_args()
torch.manual_seed(opt.seed)
check_existing_pt_files(opt)
return opt
def get_num_features(side, opt):
""" Peek one line and get number of features of it.
(All lines must have same number of features).
"""
assert side in ["src", "tgt"]
# Only "text" corpus has srouce-side features.
if side == "src":
data_file = opt.train_src if opt.data_type == "text" else None
else:
# side == "tgt"
data_file = opt.train_tgt
if data_file is not None:
with codecs.open(data_file, "r", "utf-8") as df:
f_line = df.readline().strip().split()
_, _, n_features = onmt.io.extract_features(f_line)
else:
n_features = 0
return n_features
def build_save_text_dataset_in_shards(src_corpus, tgt_corpus, fields,
corpus_type, opt, save=True):
'''
Divide the big corpus into shards, and build dataset seperately.
This is currently only for data_type=='text'.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
To tackle this, we only read in part of the corpus file of size
`max_shard_size`(actually it is multiples of 64 bytes that equals
or is slightly larger than this size), and process it into dataset,
then write it to disk along the way. By doing this, we only focus on
part of the corpus at any moment, thus effectively reducing memory use.
According to test, this method can reduce memory footprint by ~50%.
Note! As we process along the shards, previous shards might still
stay in memory, but since we are done with them, and no more
reference to them, if there is memory tight situation, the OS could
easily reclaim these memory.
If `max_shard_size` is 0 or is larger than the corpus size, it is
effectively preprocessed into one dataset, i.e. no sharding.
'''
corpus_size = os.path.getsize(src_corpus)
if corpus_size > 10 * (1024**2) and opt.max_shard_size == 0:
print("Warning! The corpus %s is larger than 10M bytes, you can "
"set '-max_shard_size' to process it by small shards "
"to avoid memory hogging problem !!!" % src_corpus)
ret_list = []
src_iter = onmt.io.ShardedTextCorpusIterator(
src_corpus, opt.src_seq_length_trunc,
"src", opt.max_shard_size)
tgt_iter = onmt.io.ShardedTextCorpusIterator(
tgt_corpus, opt.tgt_seq_length_trunc,
"tgt", opt.max_shard_size,
assoc_iter=src_iter)
index = 0
while not src_iter.hit_end():
index += 1
dataset = onmt.io.TextDataset(
fields, src_iter, tgt_iter,
src_iter.num_feats, tgt_iter.num_feats,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
dynamic_dict=opt.dynamic_dict)
ret_list.append(dataset)
if save:
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.{:d}.pt".format(
opt.save_data, corpus_type, index)
torch.save(dataset, pt_file)
if index == 1:
# Only one shard, strip the index in the filename.
os.rename(pt_file, opt.save_data + '.' + corpus_type + '.pt')
return ret_list
def build_save_dataset(corpus_type, fields, opt, save=True):
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
src_corpus = opt.train_src
tgt_corpus = opt.train_tgt
else:
src_corpus = opt.valid_src
tgt_corpus = opt.valid_tgt
# Currently we only do preprocess sharding for corpus: data_type=='text'.
if opt.data_type == 'text':
return build_save_text_dataset_in_shards(
src_corpus, tgt_corpus, fields,
corpus_type, opt, save)
# For data_type == 'img' or 'audio', currently we don't do
# preprocess sharding. We only build a monolithic dataset.
# But since the interfaces are uniform, it would be not hard
# to do this should users need this feature.
dataset = onmt.io.build_dataset(
fields, opt.data_type, src_corpus, tgt_corpus,
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window)
if save:
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.pt".format(opt.save_data, corpus_type)
torch.save(dataset, pt_file)
return [dataset]
def build_save_vocab(train_dataset, fields, opt, save=True):
# We've empty'ed each dataset's `fields` attribute
# when saving datasets, so restore them.
for train in train_dataset:
train.fields = fields
onmt.io.build_vocab(train_dataset, opt.data_type, opt.share_vocab,
opt.src_vocab_size,
opt.src_words_min_frequency,
opt.tgt_vocab_size,
opt.tgt_words_min_frequency)
if save:
# Can't save fields, so remove/reconstruct at training time.
vocab_file = opt.save_data + '.vocab.pt'
torch.save(onmt.io.save_fields_to_vocab(fields), vocab_file)
def main():
opt = parse_args()
print('Preparing for training ...')
n_src_features = get_num_features('src', opt)
n_tgt_features = get_num_features('tgt', opt)
fields = onmt.io.get_fields(opt.data_type, n_src_features, n_tgt_features)
print("Building & saving training data...")
train_datasets = build_save_dataset('train', fields, opt)
print("Building & saving vocabulary...")
build_save_vocab(train_datasets, fields, opt)
print("Building & saving validation data...")
build_save_dataset('valid', fields, opt)
if __name__ == "__main__":
main()
| 7,222 | 33.070755 | 78 | py |
Seq2Sick | Seq2Sick-master/tools/embeddings_to_torch.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import six
import sys
import numpy as np
import argparse
import torch
parser = argparse.ArgumentParser(description='embeddings_to_torch.py')
parser.add_argument('-emb_file', required=True,
help="Embeddings from this file")
parser.add_argument('-output_file', required=True,
help="Output file for the prepared data")
parser.add_argument('-dict_file', required=True,
help="Dictionary file")
parser.add_argument('-verbose', action="store_true", default=False)
opt = parser.parse_args()
def get_vocabs(dict_file):
vocabs = torch.load(dict_file)
enc_vocab, dec_vocab = vocabs[0][1], vocabs[-1][1]
print("From: %s" % dict_file)
print("\t* source vocab: %d words" % len(enc_vocab))
print("\t* target vocab: %d words" % len(dec_vocab))
return enc_vocab, dec_vocab
def get_embeddings(file):
embs = dict()
for l in open(file, 'rb').readlines():
l_split = l.decode('utf8').strip().split()
if len(l_split) == 2:
continue
embs[l_split[0]] = [float(em) for em in l_split[1:]]
print("Got {} embeddings from {}".format(len(embs), file))
return embs
def match_embeddings(vocab, emb):
dim = len(six.next(six.itervalues(emb)))
filtered_embeddings = np.zeros((len(vocab), dim))
count = {"match": 0, "miss": 0}
for w, w_id in vocab.stoi.items():
if w in emb:
filtered_embeddings[w_id] = emb[w]
count['match'] += 1
else:
if opt.verbose:
print(u"not found:\t{}".format(w), file=sys.stderr)
count['miss'] += 1
return torch.Tensor(filtered_embeddings), count
def main():
enc_vocab, dec_vocab = get_vocabs(opt.dict_file)
embeddings = get_embeddings(opt.emb_file)
filtered_enc_embeddings, enc_count = match_embeddings(enc_vocab,
embeddings)
filtered_dec_embeddings, dec_count = match_embeddings(dec_vocab,
embeddings)
print("\nMatching: ")
match_percent = [_['match'] / (_['match'] + _['miss']) * 100
for _ in [enc_count, dec_count]]
print("\t* enc: %d match, %d missing, (%.2f%%)" % (enc_count['match'],
enc_count['miss'],
match_percent[0]))
print("\t* dec: %d match, %d missing, (%.2f%%)" % (dec_count['match'],
dec_count['miss'],
match_percent[1]))
print("\nFiltered embeddings:")
print("\t* enc: ", filtered_enc_embeddings.size())
print("\t* dec: ", filtered_dec_embeddings.size())
enc_output_file = opt.output_file + ".enc.pt"
dec_output_file = opt.output_file + ".dec.pt"
print("\nSaving embedding as:\n\t* enc: %s\n\t* dec: %s"
% (enc_output_file, dec_output_file))
torch.save(filtered_enc_embeddings, enc_output_file)
torch.save(filtered_dec_embeddings, dec_output_file)
print("\nDone.")
if __name__ == "__main__":
main()
| 3,313 | 33.884211 | 74 | py |
Seq2Sick | Seq2Sick-master/tools/extract_embeddings.py | from __future__ import division
import torch
import argparse
import opts
import onmt
import onmt.ModelConstructor
import onmt.io
from onmt.Utils import use_gpu
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-output_dir', default='.',
help="""Path to output the embeddings""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def write_embeddings(filename, dict, embeddings):
with open(filename, 'wb') as file:
for i in range(min(len(embeddings), len(dict.itos))):
str = dict.itos[i].encode("utf-8")
for j in range(len(embeddings[0])):
str = str + (" %5f" % (embeddings[i][j])).encode("utf-8")
file.write(str + b"\n")
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Add in default model arguments, possibly added since training.
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
src_dict = checkpoint['vocab'][1][1]
tgt_dict = checkpoint['vocab'][0][1]
fields = onmt.io.load_fields_from_vocab(checkpoint['vocab'])
model_opt = checkpoint['opt']
for arg in dummy_opt.__dict__:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt.__dict__[arg]
model = onmt.ModelConstructor.make_base_model(
model_opt, fields, use_gpu(opt), checkpoint)
encoder = model.encoder
decoder = model.decoder
encoder_embeddings = encoder.embeddings.word_lut.weight.data.tolist()
decoder_embeddings = decoder.embeddings.word_lut.weight.data.tolist()
print("Writing source embeddings")
write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
encoder_embeddings)
print("Writing target embeddings")
write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
decoder_embeddings)
print('... done.')
print('Converting model...')
if __name__ == "__main__":
main()
| 2,422 | 31.743243 | 73 | py |
Seq2Sick | Seq2Sick-master/onmt/Loss.py | """
This file handles the details of the loss function during training.
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
import onmt
import onmt.io
class LossComputeBase(nn.Module):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating mutiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
generator (:obj:`nn.Module`) :
module that maps the output of the decoder to a
distribution over the target vocabulary.
tgt_vocab (:obj:`Vocab`) :
torchtext vocab object representing the target output
"""
def __init__(self, generator, tgt_vocab):
super(LossComputeBase, self).__init__()
self.generator = generator
self.tgt_vocab = tgt_vocab
self.padding_idx = tgt_vocab.stoi[onmt.io.PAD_WORD]
def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def monolithic_compute_loss(self, batch, output, attns):
"""
Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
Returns:
:obj:`onmt.Statistics`: loss statistics
"""
range_ = (0, batch.tgt.size(0))
shard_state = self._make_shard_state(batch, output, range_, attns)
_, batch_stats = self._compute_loss(batch, **shard_state)
return batch_stats
def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size):
"""Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(cur_trunc, cur_trunc + trunc_size)`.
Note harding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
cur_trunc (int) : starting position of truncation window
trunc_size (int) : length of truncation window
shard_size (int) : maximum number of examples in a shard
Returns:
:obj:`onmt.Statistics`: validation loss statistics
"""
batch_stats = onmt.Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self._make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size):
loss, stats = self._compute_loss(batch, **shard)
loss.div(batch.batch_size).backward()
batch_stats.update(stats)
return batch_stats
def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target) \
.masked_select(non_padding) \
.sum()
return onmt.Statistics(loss[0], non_padding.sum(), num_correct)
def _bottle(self, v):
return v.view(-1, v.size(2))
def _unbottle(self, v, batch_size):
return v.view(-1, batch_size, v.size(1))
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, generator, tgt_vocab, label_smoothing=0.0):
super(NMTLossCompute, self).__init__(generator, tgt_vocab)
assert (label_smoothing >= 0.0 and label_smoothing <= 1.0)
if label_smoothing > 0:
# When label smoothing is turned on,
# KL-divergence between q_{smoothed ground truth prob.}(w)
# and p_{prob. computed by model}(w) is minimized.
# If label smoothing value is set to zero, the loss
# is equivalent to NLLLoss or CrossEntropyLoss.
# All non-true labels are uniformly set to low-confidence.
self.criterion = nn.KLDivLoss(size_average=False)
one_hot = torch.randn(1, len(tgt_vocab))
one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2))
one_hot[0][self.padding_idx] = 0
self.register_buffer('one_hot', one_hot)
else:
weight = torch.ones(len(tgt_vocab))
weight[self.padding_idx] = 0
self.criterion = nn.NLLLoss(weight, size_average=False)
self.confidence = 1.0 - label_smoothing
def _make_shard_state(self, batch, output, range_, attns=None):
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
}
def _compute_loss(self, batch, output, target):
scores = self.generator(self._bottle(output))
gtruth = target.view(-1)
if self.confidence < 1:
tdata = gtruth.data
mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze()
likelihood = torch.gather(scores.data, 1, tdata.unsqueeze(1))
tmp_ = self.one_hot.repeat(gtruth.size(0), 1)
tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence)
if mask.dim() > 0:
likelihood.index_fill_(0, mask, 0)
tmp_.index_fill_(0, mask, 0)
gtruth = Variable(tmp_, requires_grad=False)
loss = self.criterion(scores, gtruth)
if self.confidence < 1:
loss_data = - likelihood.sum(0)
else:
loss_data = loss.data.clone()
stats = self._stats(loss_data, scores.data, target.view(-1).data)
return loss, stats
def filter_shard_state(state):
for k, v in state.items():
if v is not None:
if isinstance(v, Variable) and v.requires_grad:
v = Variable(v.data, requires_grad=True, volatile=False)
yield k, v
def shards(state, shard_size, eval=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval:
yield state
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, torch.split(v, shard_size))
for k, v in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = ((state[k], v.grad.data) for k, v in non_none.items()
if isinstance(v, Variable) and v.grad is not None)
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
| 9,843 | 36.716475 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/Utils.py | import torch
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
def use_gpu(opt):
return (hasattr(opt, 'gpuid') and len(opt.gpuid) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1)
| 726 | 24.068966 | 62 | py |
Seq2Sick | Seq2Sick-master/onmt/ModelConstructor.py | """
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import torch
import torch.nn as nn
import onmt
import onmt.io
import onmt.Models
import onmt.modules
from onmt.Models import NMTModel, MeanEncoder, RNNEncoder, \
StdRNNDecoder, InputFeedRNNDecoder
from onmt.modules import Embeddings, ImageEncoder, CopyGenerator, \
TransformerEncoder, TransformerDecoder, \
CNNEncoder, CNNDecoder, AudioEncoder
from onmt.Utils import use_gpu
def make_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Make an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): make Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[onmt.io.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[onmt.io.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(embedding_dim,
opt.position_encoding,
opt.feat_merge,
opt.feat_vec_exponent,
opt.feat_vec_size,
opt.dropout,
word_padding_idx,
feats_padding_idx,
num_word_embeddings,
num_feat_embeddings)
def make_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.rnn_size,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.rnn_size, opt.dropout, embeddings)
def make_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings)
def load_test_model(opt, dummy_opt):
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
fields = onmt.io.load_fields_from_vocab(
checkpoint['vocab'], data_type=opt.data_type)
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
model = make_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
model.eval()
model.generator.eval()
return fields, model, model_opt
def make_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], \
("Unsupported model type %s" % (model_opt.model_type))
# Make encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = onmt.io.collect_feature_vocabs(fields, 'src')
src_embeddings = make_embeddings(model_opt, src_dict,
feature_dicts)
encoder = make_encoder(model_opt, src_embeddings)
elif model_opt.model_type == "img":
encoder = ImageEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.rnn_size,
model_opt.dropout)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.rnn_size,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size)
# Make decoder.
tgt_dict = fields["tgt"].vocab
feature_dicts = onmt.io.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = make_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
if src_dict != tgt_dict:
raise AssertionError('The `-share_vocab` should be set during '
'preprocess if you use share_embeddings!')
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = make_decoder(model_opt, tgt_embeddings)
# Make NMTModel(= encoder + decoder).
model = NMTModel(encoder, decoder)
model.model_type = model_opt.model_type
# Make Generator.
if not model_opt.copy_attn:
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)),
nn.LogSoftmax())
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt, fields["src"].vocab,
fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
print('Loading model parameters.')
model.load_state_dict(checkpoint['model'])
generator.load_state_dict(checkpoint['generator'])
else:
if model_opt.param_init != 0.0:
print('Intializing model parameters.')
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
# Make the whole model leverage GPU if indicated to do so.
if gpu:
model.cuda()
else:
model.cpu()
return model
| 8,731 | 37.808889 | 77 | py |
Seq2Sick | Seq2Sick-master/onmt/Trainer.py | from __future__ import division
"""
This is the loadable seq2seq trainer library that is
in charge of training details, loss compute, and statistics.
See train.py for a use case of this library.
Note!!! To make this a general library, we implement *only*
mechanism things here(i.e. what to do), and leave the strategy
things to users(i.e. how to do it). Also see train.py(one of the
users of this library) for the strategy things we do.
"""
import time
import sys
import math
import torch
import torch.nn as nn
import onmt
import onmt.io
import onmt.modules
class Statistics(object):
"""
Accumulator for loss statistics.
Currently calculates:
* accuracy
* perplexity
* elapsed time
"""
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
def update(self, stat):
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
def accuracy(self):
return 100 * (self.n_correct / self.n_words)
def ppl(self):
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
return time.time() - self.start_time
def output(self, epoch, batch, n_batches, start):
"""Write out statistics to stdout.
Args:
epoch (int): current epoch
batch (int): current batch
n_batch (int): total batches
start (int): start time of epoch.
"""
t = self.elapsed_time()
print(("Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; " +
"%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed") %
(epoch, batch, n_batches,
self.accuracy(),
self.ppl(),
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
def log(self, prefix, experiment, lr):
t = self.elapsed_time()
experiment.add_scalar_value(prefix + "_ppl", self.ppl())
experiment.add_scalar_value(prefix + "_accuracy", self.accuracy())
experiment.add_scalar_value(prefix + "_tgtper", self.n_words / t)
experiment.add_scalar_value(prefix + "_lr", lr)
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.Model.NMTModel`): translation model to train
train_iter: training data iterator
valid_iter: validate data iterator
train_loss(:obj:`onmt.Loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.Loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.Optim.Optim`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
"""
def __init__(self, model, train_iter, valid_iter,
train_loss, valid_loss, optim,
trunc_size, shard_size, data_type='text'):
# Basic attributes.
self.model = model
self.train_iter = train_iter
self.valid_iter = valid_iter
self.train_loss = train_loss
self.valid_loss = valid_loss
self.optim = optim
self.trunc_size = trunc_size
self.shard_size = shard_size
self.model_type = self.model.model_type
self.data_type = data_type
# Set model in training mode.
self.model.train()
def train(self, epoch, report_func=None):
""" Train next epoch.
Args:
epoch(int): the epoch number
report_func(fn): function for logging
Returns:
stats (:obj:`onmt.Statistics`): epoch loss statistics
"""
total_stats = Statistics()
report_stats = Statistics()
for i, batch in enumerate(self.train_iter):
target_size = batch.tgt.size(0)
# Truncated BPTT
trunc_size = self.trunc_size if self.trunc_size else target_size
dec_state = None
src = onmt.io.make_features(batch, 'src', self.data_type)
if self.data_type == 'text':
_, src_lengths = batch.src
report_stats.n_src_words += src_lengths.sum()
else:
src_lengths = None
tgt_outer = onmt.io.make_features(batch, 'tgt')
for j in range(0, target_size-1, trunc_size):
# 1. Create truncated target.
tgt = tgt_outer[j: j + trunc_size]
# 2. F-prop all but generator.
self.model.zero_grad()
outputs, attns, dec_state = \
self.model(src, tgt, src_lengths, dec_state)
# 3. Compute loss in shards for memory efficiency.
batch_stats = self.train_loss.sharded_compute_loss(
batch, outputs, attns, j,
trunc_size, self.shard_size)
# 4. Update the parameters and statistics.
self.optim.step()
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# If truncated, don't backprop fully.
if dec_state is not None:
dec_state.detach()
if report_func is not None:
report_stats = report_func(
epoch, i, len(self.train_iter),
total_stats.start_time, self.optim.lr, report_stats)
return total_stats
def validate(self):
""" Validate model.
Returns:
:obj:`onmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = Statistics()
for batch in self.valid_iter:
src = onmt.io.make_features(batch, 'src', self.data_type)
if self.data_type == 'text':
_, src_lengths = batch.src
else:
src_lengths = None
tgt = onmt.io.make_features(batch, 'tgt')
# F-prop through the model.
outputs, attns, _ = self.model(src, tgt, src_lengths)
# Compute loss.
batch_stats = self.valid_loss.monolithic_compute_loss(
batch, outputs, attns)
# Update statistics.
stats.update(batch_stats)
# Set model back to training mode.
self.model.train()
return stats
def epoch_step(self, ppl, epoch):
return self.optim.update_learning_rate(ppl, epoch)
def drop_checkpoint(self, opt, epoch, fields, valid_stats):
""" Save a resumable checkpoint.
Args:
opt (dict): option object
epoch (int): epoch number
fields (dict): fields and vocabulary
valid_stats : statistics of last validation run
"""
real_model = (self.model.module
if isinstance(self.model, nn.DataParallel)
else self.model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': onmt.io.save_fields_to_vocab(fields),
'opt': opt,
'epoch': epoch,
'optim': self.optim,
}
torch.save(checkpoint,
'%s_acc_%.2f_ppl_%.2f_e%d.pt'
% (opt.save_model, valid_stats.accuracy(),
valid_stats.ppl(), epoch))
| 8,234 | 32.612245 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/Optim.py | import torch.optim as optim
from torch.nn.utils import clip_grad_norm
class Optim(object):
"""
Controller class for optimization. Mostly a thin
wrapper for `optim`, but also useful for implementing
rate scheduling beyond what is currently available.
Also implements necessary methods for training RNNs such
as grad manipulations.
Args:
method (:obj:`str`): one of [sgd, adagrad, adadelta, adam]
lr (float): learning rate
lr_decay (float, optional): learning rate decay multiplier
start_decay_at (int, optional): epoch to start learning rate decay
beta1, beta2 (float, optional): parameters for adam
adagrad_accum (float, optional): initialization parameter for adagrad
"""
# We use the default parameters for Adam that are suggested by
# the original paper https://arxiv.org/pdf/1412.6980.pdf
# These values are also used by other established implementations,
# e.g. https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
# https://keras.io/optimizers/
# Recently there are slightly different values used in the paper
# "Attention is all you need"
# https://arxiv.org/pdf/1706.03762.pdf, particularly the value beta2=0.98
# was used there however, beta2=0.999 is still arguably the more
# established value, so we use that here as well
def __init__(self, method, lr, max_grad_norm,
lr_decay=1, start_decay_at=None,
beta1=0.9, beta2=0.999,
adagrad_accum=0.0,
opt=None):
self.last_ppl = None
self.lr = lr
self.max_grad_norm = max_grad_norm
self.method = method
self.lr_decay = lr_decay
self.start_decay_at = start_decay_at
self.start_decay = False
self._step = 0
self.betas = [beta1, beta2]
self.adagrad_accum = adagrad_accum
self.opt = opt
def set_parameters(self, params):
self.params = [p for p in params if p.requires_grad]
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.lr)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.lr)
for group in self.optimizer.param_groups:
for p in group['params']:
self.optimizer.state[p]['sum'] = self.optimizer\
.state[p]['sum'].fill_(self.adagrad_accum)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.lr)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.lr,
betas=self.betas, eps=1e-9)
else:
raise RuntimeError("Invalid optim method: " + self.method)
def _set_rate(self, lr):
self.lr = lr
self.optimizer.param_groups[0]['lr'] = self.lr
def step(self):
"""Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate.
"""
self._step += 1
# Decay method used in tensor2tensor.
if self.opt.__dict__.get("decay_method", "") == "noam":
self._setRate(
self.opt.learning_rate *
(self.opt.rnn_size ** (-0.5) *
min(self._step ** (-0.5),
self._step * self.opt.warmup_steps**(-1.5))))
if self.max_grad_norm:
clip_grad_norm(self.params, self.max_grad_norm)
self.optimizer.step()
def update_learning_rate(self, ppl, epoch):
"""
Decay learning rate if val perf does not improve
or we hit the start_decay_at limit.
"""
if self.start_decay_at is not None and epoch >= self.start_decay_at:
self.start_decay = True
if self.last_ppl is not None and ppl > self.last_ppl:
self.start_decay = True
if self.start_decay:
self.lr = self.lr * self.lr_decay
print("Decaying learning rate to %g" % self.lr)
self.last_ppl = ppl
self.optimizer.param_groups[0]['lr'] = self.lr
| 4,202 | 38.280374 | 77 | py |
Seq2Sick | Seq2Sick-master/onmt/Models.py | from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
import onmt
from onmt.Utils import aeq
class EncoderBase(nn.Module):
"""
Base encoder class. Specifies the interface used by different encoder types
and required by :obj:`onmt.Models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
F[Context]
G[Final]
A-->C
A-->D
A-->E
C-->F
D-->F
E-->F
E-->G
"""
def _check_args(self, input, lengths=None, hidden=None):
s_len, n_batch, n_feats = input.size()
if lengths is not None:
n_batch_, = lengths.size()
aeq(n_batch, n_batch_)
def forward(self, input, lengths=None, hidden=None):
"""
Args:
input (:obj:`LongTensor`):
padded sequences of sparse indices `[src_len x batch x nfeat]`
lengths (:obj:`LongTensor`): length of each sequence `[batch]`
hidden (class specific):
initial hidden state.
Returns:k
(tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):
* final encoder state, used to initialize decoder
`[layers x batch x hidden]`
* contexts for attention, `[src_len x batch x hidden]`
"""
raise NotImplementedError
class MeanEncoder(EncoderBase):
"""A trivial non-recurrent encoder. Simply applies mean pooling.
Args:
num_layers (int): number of replicated layers
embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use
"""
def __init__(self, num_layers, embeddings):
super(MeanEncoder, self).__init__()
self.num_layers = num_layers
self.embeddings = embeddings
def forward(self, input, lengths=None, hidden=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)
return (mean, mean), emb
class RNNEncoder(EncoderBase):
""" A generic recurrent neural network encoder.
Args:
rnn_type (:obj:`str`):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional (bool) : use a bidirectional RNN
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
dropout (float) : dropout value for :obj:`nn.Dropout`
embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use
"""
def __init__(self, rnn_type, bidirectional, num_layers,
hidden_size, dropout, embeddings):
super(RNNEncoder, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.embeddings = embeddings
self.no_pack_padded_seq = False
# Use pytorch version when available.
if rnn_type == "SRU":
# SRU doesn't support PackedSequence.
self.no_pack_padded_seq = True
self.rnn = onmt.modules.SRU(
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
else:
self.rnn = getattr(nn, rnn_type)(
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, input, lengths=None, hidden=None, FLAG=True):
"See :obj:`EncoderBase.forward()`"
self._check_args(input, lengths, hidden)
if FLAG:
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
else:
emb = input
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Variable.
lengths = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths)
outputs, hidden_t = self.rnn(packed_emb, hidden)
if lengths is not None and not self.no_pack_padded_seq:
outputs = unpack(outputs)[0]
return hidden_t, outputs
def get_embedding(self, input):
emb = self.embeddings(input)
return emb
class RNNDecoderBase(nn.Module):
"""
Base recurrent attention-based decoder class.
Specifies the interface used by different decoder types
and required by :obj:`onmt.Models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
G[Decoder State]
H[Decoder State]
I[Outputs]
F[Context]
A--emb-->C
A--emb-->D
A--emb-->E
H-->C
C-- attn --- F
D-- attn --- F
E-- attn --- F
C-->I
D-->I
E-->I
E-->G
F---I
Args:
rnn_type (:obj:`str`):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional_encoder (bool) : use with a bidirectional encoder
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
attn_type (str) : see :obj:`onmt.modules.GlobalAttention`
coverage_attn (str): see :obj:`onmt.modules.GlobalAttention`
context_gate (str): see :obj:`onmt.modules.ContextGate`
copy_attn (bool): setup a separate copy attention mechanism
dropout (float) : dropout value for :obj:`nn.Dropout`
embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type, coverage_attn, context_gate,
copy_attn, dropout, embeddings):
super(RNNDecoderBase, self).__init__()
# Basic attributes.
self.decoder_type = 'rnn'
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embeddings = embeddings
self.dropout = nn.Dropout(dropout)
# Build the RNN.
self.rnn = self._build_rnn(rnn_type, self._input_size, hidden_size,
num_layers, dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = onmt.modules.context_gate_factory(
context_gate, self._input_size,
hidden_size, hidden_size, hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
self.attn = onmt.modules.GlobalAttention(
hidden_size, coverage=coverage_attn,
attn_type=attn_type
)
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type
)
self._copy = True
def forward(self, input, context, state, context_lengths=None):
"""
Args:
input (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
context (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.Models.DecoderState`):
decoder state object to initialize the decoder
context_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* outputs: output from the decoder
`[tgt_len x batch x hidden]`.
* state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`.
"""
# Args Check
assert isinstance(state, RNNDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
# END Args Check
# Run the forward pass of the RNN.
hidden, outputs, attns, coverage = self._run_forward_pass(
input, context, state, context_lengths=context_lengths)
# Update the state with the result.
final_output = outputs[-1]
state.update_state(hidden, final_output.unsqueeze(0),
coverage.unsqueeze(0)
if coverage is not None else None)
# Concatenates sequence of tensors along a new dimension.
outputs = torch.stack(outputs)
for k in attns:
attns[k] = torch.stack(attns[k])
return outputs, state, attns
def _fix_enc_hidden(self, h):
"""
The encoder hidden is (layers*directions) x batch x dim.
We need to convert it to layers x batch x (directions*dim).
"""
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
def init_decoder_state(self, src, context, enc_hidden):
if isinstance(enc_hidden, tuple): # LSTM
return RNNDecoderState(context, self.hidden_size,
tuple([self._fix_enc_hidden(enc_hidden[i])
for i in range(len(enc_hidden))]))
else: # GRU
return RNNDecoderState(context, self.hidden_size,
self._fix_enc_hidden(enc_hidden))
class StdRNNDecoder(RNNDecoderBase):
"""
Standard fully batched RNN decoder with attention.
Faster implementation, uses CuDNN for implementation.
See :obj:`RNNDecoderBase` for options.
Based around the approach from
"Neural Machine Translation By Jointly Learning To Align and Translate"
:cite:`Bahdanau2015`
Implemented without input_feeding and currently with no `coverage_attn`
or `copy_attn` support.
"""
def _run_forward_pass(self, input, context, state, context_lengths=None):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
context_lengths (LongTensor): the source context lengths.
Returns:
hidden (Variable): final hidden state from the decoder.
outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder.
coverage (FloatTensor, optional): coverage from the decoder.
"""
assert not self._copy # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
# Initialize local and return variables.
outputs = []
attns = {"std": []}
coverage = None
emb = self.embeddings(input)
# Run the forward pass of the RNN.
if isinstance(self.rnn, nn.GRU):
rnn_output, hidden = self.rnn(emb, state.hidden[0])
else:
rnn_output, hidden = self.rnn(emb, state.hidden)
# Result Check
input_len, input_batch, _ = input.size()
output_len, output_batch, _ = rnn_output.size()
aeq(input_len, output_len)
aeq(input_batch, output_batch)
# END Result Check
# Calculate the attention.
attn_outputs, attn_scores = self.attn(
rnn_output.transpose(0, 1).contiguous(), # (output_len, batch, d)
context.transpose(0, 1), # (contxt_len, batch, d)
context_lengths=context_lengths
)
attns["std"] = attn_scores
# Calculate the context gate.
if self.context_gate is not None:
outputs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
attn_outputs.view(-1, attn_outputs.size(2))
)
outputs = outputs.view(input_len, input_batch, self.hidden_size)
outputs = self.dropout(outputs)
else:
outputs = self.dropout(attn_outputs) # (input_len, batch, d)
# Return result.
return hidden, outputs, attns, coverage
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
"""
Private helper for building standard decoder RNN.
"""
# Use pytorch version when available.
if rnn_type == "SRU":
return onmt.modules.SRU(
input_size, hidden_size,
num_layers=num_layers,
dropout=dropout)
return getattr(nn, rnn_type)(
input_size, hidden_size,
num_layers=num_layers,
dropout=dropout)
@property
def _input_size(self):
"""
Private helper returning the number of expected features.
"""
return self.embeddings.embedding_size
class InputFeedRNNDecoder(RNNDecoderBase):
"""
Input feeding based decoder. See :obj:`RNNDecoderBase` for options.
Based around the input feeding approach from
"Effective Approaches to Attention-based Neural Machine Translation"
:cite:`Luong2015`
.. mermaid::
graph BT
A[Input n-1]
AB[Input n]
subgraph RNN
E[Pos n-1]
F[Pos n]
E --> F
end
G[Encoder]
H[Context n-1]
A --> E
AB --> F
E --> H
G --> H
"""
def _run_forward_pass(self, input, context, state, context_lengths=None):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
"""
# Additional args check.
output = state.input_feed.squeeze(0)
output_batch, _ = output.size()
input_len, input_batch, _ = input.size()
aeq(input_batch, output_batch)
# END Additional args check.
# Initialize local and return variables.
outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
hidden = state.hidden
coverage = state.coverage.squeeze(0) \
if state.coverage is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for i, emb_t in enumerate(emb.split(1)):
emb_t = emb_t.squeeze(0)
emb_t = torch.cat([emb_t, output], 1)
rnn_output, hidden = self.rnn(emb_t, hidden)
attn_output, attn = self.attn(
rnn_output,
context.transpose(0, 1),
context_lengths=context_lengths)
if self.context_gate is not None:
output = self.context_gate(
emb_t, rnn_output, attn_output
)
output = self.dropout(output)
else:
output = self.dropout(attn_output)
outputs += [output]
attns["std"] += [attn]
# Update the coverage attention.
if self._coverage:
coverage = coverage + attn \
if coverage is not None else attn
attns["coverage"] += [coverage]
# Run the forward pass of the copy attention layer.
if self._copy:
_, copy_attn = self.copy_attn(output,
context.transpose(0, 1))
attns["copy"] += [copy_attn]
# Return result.
return hidden, outputs, attns, coverage
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert not rnn_type == "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
if rnn_type == "LSTM":
stacked_cell = onmt.modules.StackedLSTM
else:
stacked_cell = onmt.modules.StackedGRU
return stacked_cell(num_layers, input_size,
hidden_size, dropout)
@property
def _input_size(self):
"""
Using input feed by concatenating input with attention vectors.
"""
return self.embeddings.embedding_size + self.hidden_size
class NMTModel(nn.Module):
"""
Core trainable object in OpenNMT. Implements a trainable interface
for a simple, generic encoder + decoder model.
Args:
encoder (:obj:`EncoderBase`): an encoder object
decoder (:obj:`RNNDecoderBase`): a decoder object
multigpu (bool): setup for multigpu support
"""
def __init__(self, encoder, decoder, multigpu=False):
self.multigpu = multigpu
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, lengths, dec_state=None):
"""Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (:obj:`Tensor`):
a source sequence passed to encoder.
typically for inputs this will be a padded :obj:`LongTensor`
of size `[len x batch x features]`. however, may be an
image or other generic input depending on encoder.
tgt (:obj:`LongTensor`):
a target sequence of size `[tgt_len x batch]`.
lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.
dec_state (:obj:`DecoderState`, optional): initial decoder state
Returns:
(:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):
* decoder output `[tgt_len x batch x hidden]`
* dictionary attention dists of `[tgt_len x batch x src_len]`
* final decoder state
"""
tgt = tgt[:-1] # exclude last target from inputs
enc_hidden, context = self.encoder(src, lengths)
enc_state = self.decoder.init_decoder_state(src, context, enc_hidden)
out, dec_state, attns = self.decoder(tgt, context,
enc_state if dec_state is None
else dec_state)
if self.multigpu:
# Not yet supported on multi-gpu
dec_state = None
attns = None
return out, attns, dec_state
class DecoderState(object):
"""Interface for grouping together the current state of a recurrent
decoder. In the simplest case just represents the hidden state of
the model. But can also be used for implementing various forms of
input_feeding and non-recurrent models.
Modules need to implement this to utilize beam search decoding.
"""
def detach(self):
for h in self._all:
if h is not None:
h.detach_()
def beam_update(self, idx, positions, beam_size):
for e in self._all:
a, br, d = e.size()
sent_states = e.view(a, beam_size, br // beam_size, d)[:, :, idx]
sent_states.data.copy_(
sent_states.data.index_select(1, positions))
class RNNDecoderState(DecoderState):
def __init__(self, context, hidden_size, rnnstate):
"""
Args:
context (FloatTensor): output from the encoder of size
len x batch x rnn_size.
hidden_size (int): the size of hidden layer of the decoder.
rnnstate (Variable): final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim).
input_feed (FloatTensor): output from last layer of the decoder.
coverage (FloatTensor): coverage output from the decoder.
"""
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.coverage = None
# Init the input feed.
batch_size = context.size(1)
h_size = (batch_size, hidden_size)
self.input_feed = Variable(context.data.new(*h_size).zero_(),
requires_grad=False).unsqueeze(0)
@property
def _all(self):
return self.hidden + (self.input_feed,)
def update_state(self, rnnstate, input_feed, coverage):
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.input_feed = input_feed
self.coverage = coverage
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
vars = [Variable(e.data.repeat(1, beam_size, 1), volatile=False)
for e in self._all]
self.hidden = tuple(vars[:-1])
self.input_feed = vars[-1]
| 22,339 | 34.801282 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/ConvMultiStepAttention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.Utils import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def seq_linear(linear, x):
# linear transform for 3-d tensor
batch, hidden_size, length, _ = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view(
batch * length, hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)
class ConvMultiStepAttention(nn.Module):
def __init__(self, input_size):
super(ConvMultiStepAttention, self).__init__()
self.linear_in = nn.Linear(input_size, input_size)
self.mask = None
def apply_mask(self, mask):
self.mask = mask
def forward(self, base_target_emb, input, encoder_out_top,
encoder_out_combine):
"""
It's like Luong Attetion.
Conv attention takes a key matrix, a value matrix and a query vector.
Attention weight is calculated by key matrix with the query vector
and sum on the value matrix. And the same operation is applied
in each decode conv layer.
Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_c: the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode
"""
# checks
batch, channel, height, width = base_target_emb.size()
batch_, channel_, height_, width_ = input.size()
aeq(batch, batch_)
aeq(height, height_)
enc_batch, enc_channel, enc_height = encoder_out_top.size()
enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()
aeq(enc_batch, enc_batch_)
aeq(enc_height, enc_height_)
preatt = seq_linear(self.linear_in, input)
target = (base_target_emb + preatt) * SCALE_WEIGHT
target = torch.squeeze(target, 3)
target = torch.transpose(target, 1, 2)
pre_attn = torch.bmm(target, encoder_out_top)
if self.mask is not None:
pre_attn.data.masked_fill_(self.mask, -float('inf'))
pre_attn = pre_attn.transpose(0, 2)
attn = F.softmax(pre_attn)
attn = attn.transpose(0, 2).contiguous()
context_output = torch.bmm(
attn, torch.transpose(encoder_out_combine, 1, 2))
context_output = torch.transpose(
torch.unsqueeze(context_output, 3), 1, 2)
return context_output, attn
| 2,611 | 34.780822 | 77 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/Transformer.py | """
Implementation of "Attention is All You Need"
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import onmt
from onmt.Models import EncoderBase
from onmt.Models import DecoderState
from onmt.Utils import aeq
MAX_SIZE = 5000
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network."""
def __init__(self, size, hidden_size, dropout=0.1):
"""
Args:
size(int): the size of input for the first-layer of the FFN.
hidden_size(int): the hidden layer size of the second-layer
of the FNN.
droput(float): dropout probability(0-1.0).
"""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = onmt.modules.BottleLinear(size, hidden_size)
self.w_2 = onmt.modules.BottleLinear(hidden_size, size)
self.layer_norm = onmt.modules.BottleLayerNorm(size)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
class TransformerEncoderLayer(nn.Module):
def __init__(self, size, dropout,
head_count=8, hidden_size=2048):
"""
Args:
size(int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
droput(float): dropout probability(0-1.0).
head_count(int): the number of head for MultiHeadedAttention.
hidden_size(int): the second-layer of the PositionwiseFeedForward.
"""
super(TransformerEncoderLayer, self).__init__()
self.self_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.feed_forward = PositionwiseFeedForward(size,
hidden_size,
dropout)
self.layer_norm = onmt.modules.BottleLayerNorm(size)
def forward(self, input, mask):
input_norm = self.layer_norm(input)
mid, _ = self.self_attn(input_norm, input_norm, input_norm, mask=mask)
out = self.feed_forward(mid + input)
return out
class TransformerEncoder(EncoderBase):
"""
The Transformer encoder from "Attention is All You Need".
"""
def __init__(self, num_layers, hidden_size,
dropout, embeddings):
super(TransformerEncoder, self).__init__()
self.num_layers = num_layers
self.embeddings = embeddings
self.transformer = nn.ModuleList(
[TransformerEncoderLayer(hidden_size, dropout)
for i in range(num_layers)])
self.layer_norm = onmt.modules.BottleLayerNorm(hidden_size)
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, n_batch, emb_dim = emb.size()
out = emb.transpose(0, 1).contiguous()
words = input[:, :, 0].transpose(0, 1)
# CHECKS
out_batch, out_len, _ = out.size()
w_batch, w_len = words.size()
aeq(out_batch, w_batch)
aeq(out_len, w_len)
# END CHECKS
# Make mask.
padding_idx = self.embeddings.word_padding_idx
mask = words.data.eq(padding_idx).unsqueeze(1) \
.expand(w_batch, w_len, w_len)
# Run the forward pass of every layer of the tranformer.
for i in range(self.num_layers):
out = self.transformer[i](out, mask)
out = self.layer_norm(out)
return Variable(emb.data), out.transpose(0, 1).contiguous()
class TransformerDecoderLayer(nn.Module):
def __init__(self, size, dropout,
head_count=8, hidden_size=2048):
"""
Args:
size(int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
droput(float): dropout probability(0-1.0).
head_count(int): the number of head for MultiHeadedAttention.
hidden_size(int): the second-layer of the PositionwiseFeedForward.
"""
super(TransformerDecoderLayer, self).__init__()
self.self_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.context_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.feed_forward = PositionwiseFeedForward(size,
hidden_size,
dropout)
self.layer_norm_1 = onmt.modules.BottleLayerNorm(size)
self.layer_norm_2 = onmt.modules.BottleLayerNorm(size)
self.dropout = dropout
mask = self._get_attn_subsequent_mask(MAX_SIZE)
# Register self.mask as a buffer in TransformerDecoderLayer, so
# it gets TransformerDecoderLayer's cuda behavior automatically.
self.register_buffer('mask', mask)
def forward(self, input, context, src_pad_mask, tgt_pad_mask):
# Args Checks
input_batch, input_len, _ = input.size()
contxt_batch, contxt_len, _ = context.size()
aeq(input_batch, contxt_batch)
src_batch, t_len, s_len = src_pad_mask.size()
tgt_batch, t_len_, t_len__ = tgt_pad_mask.size()
aeq(input_batch, contxt_batch, src_batch, tgt_batch)
aeq(t_len, t_len_, t_len__, input_len)
aeq(s_len, contxt_len)
# END Args Checks
dec_mask = torch.gt(tgt_pad_mask + self.mask[:, :tgt_pad_mask.size(1),
:tgt_pad_mask.size(1)]
.expand_as(tgt_pad_mask), 0)
input_norm = self.layer_norm_1(input)
query, attn = self.self_attn(input_norm, input_norm, input_norm,
mask=dec_mask)
query_norm = self.layer_norm_2(query+input)
mid, attn = self.context_attn(context, context, query_norm,
mask=src_pad_mask)
output = self.feed_forward(mid+query+input)
# CHECKS
output_batch, output_len, _ = output.size()
aeq(input_len, output_len)
aeq(contxt_batch, output_batch)
n_batch_, t_len_, s_len_ = attn.size()
aeq(input_batch, n_batch_)
aeq(contxt_len, s_len_)
aeq(input_len, t_len_)
# END CHECKS
return output, attn
def _get_attn_subsequent_mask(self, size):
''' Get an attention mask to avoid using the subsequent info.'''
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
subsequent_mask = torch.from_numpy(subsequent_mask)
return subsequent_mask
class TransformerDecoder(nn.Module):
"""
The Transformer decoder from "Attention is All You Need".
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, dropout, embeddings):
super(TransformerDecoder, self).__init__()
# Basic attributes.
self.decoder_type = 'transformer'
self.num_layers = num_layers
self.embeddings = embeddings
# Build TransformerDecoder.
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(hidden_size, dropout)
for _ in range(num_layers)])
# TransformerDecoder has its own attention mechanism.
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type)
self._copy = True
self.layer_norm = onmt.modules.BottleLayerNorm(hidden_size)
def forward(self, input, context, state, context_lengths=None):
"""
Forward through the TransformerDecoder.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
context_lengths (LongTensor): the source context lengths, this is
not used for TransformerDecoder, but
for interface compatibility.
Returns:
outputs (FloatTensor): a Tensor sequence of output from the decoder
of shape (len x batch x hidden_size).
state (FloatTensor): final hidden state from the decoder.
attns (dict of (str, FloatTensor)): a dictionary of different
type of attention Tensor from the decoder
of shape (src_len x batch).
"""
# CHECKS
assert isinstance(state, TransformerDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
if state.previous_input is not None:
input = torch.cat([state.previous_input, input], 0)
src = state.src
src_words = src[:, :, 0].transpose(0, 1)
tgt_words = input[:, :, 0].transpose(0, 1)
src_batch, src_len = src_words.size()
tgt_batch, tgt_len = tgt_words.size()
aeq(input_batch, contxt_batch, src_batch, tgt_batch)
aeq(contxt_len, src_len)
# aeq(input_len, tgt_len)
# END CHECKS
# Initialize return variables.
outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
# Run the forward pass of the TransformerDecoder.
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_context = context.transpose(0, 1).contiguous()
padding_idx = self.embeddings.word_padding_idx
src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1) \
.expand(src_batch, tgt_len, src_len)
tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1) \
.expand(tgt_batch, tgt_len, tgt_len)
for i in range(self.num_layers):
output, attn \
= self.transformer_layers[i](output, src_context,
src_pad_mask, tgt_pad_mask)
output = self.layer_norm(output)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(input)
return outputs, state, attns
def init_decoder_state(self, src, context, enc_hidden):
return TransformerDecoderState(src)
class TransformerDecoderState(DecoderState):
def __init__(self, src):
"""
Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch).
"""
self.src = src
self.previous_input = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input, self.src)
def update_state(self, input):
""" Called for every decoder forward pass. """
self.previous_input = input
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.src = Variable(self.src.data.repeat(1, beam_size, 1),
volatile=True)
| 12,494 | 37.684211 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/Embeddings.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from onmt.modules import BottleLinear, Elementwise
from onmt.Utils import aeq
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.arange(0, max_len).unsqueeze(1).expand(max_len, dim)
div_term = 1 / torch.pow(10000, torch.arange(0, dim * 2, 2) / dim)
pe = pe * div_term.expand_as(pe)
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
pe = pe.unsqueeze(1)
super(PositionalEncoding, self).__init__()
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
def forward(self, emb):
# We must wrap the self.pe in Variable to compute, not the other
# way - unwrap emb(i.e. emb.data). Otherwise the computation
# wouldn't be watched to build the compute graph.
emb = emb + Variable(self.pe[:emb.size(0), :1, :emb.size(2)]
.expand_as(emb), requires_grad=False)
emb = self.dropout(emb)
return emb
class Embeddings(nn.Module):
"""
Words embeddings for encoder/decoder.
Additionally includes ability to add sparse input features
based on "Linguistic Input Features Improve Neural Machine Translation"
:cite:`sennrich2016linguistic`.
.. mermaid::
graph LR
A[Input]
C[Feature 1 Lookup]
A-->B[Word Lookup]
A-->C
A-->D[Feature N Lookup]
B-->E[MLP/Concat]
C-->E
D-->E
E-->F[Output]
Args:
word_vec_size (int): size of the dictionary of embeddings.
position_encoding (bool): use a sin to mark relative words positions.
feat_merge (string): merge action for the features embeddings:
concat, sum or mlp.
feat_vec_exponent (float): when using `-feat_merge concat`, feature
embedding size is N^feat_dim_exponent, where N is the
number of values of feature takes.
feat_vec_size (int): embedding dimension for features when using
`-feat_merge mlp`
dropout (float): dropout probability.
word_padding_idx (int): padding index for words in the embeddings.
feats_padding_idx (list of int): padding index for a list of features
in the embeddings.
word_vocab_size (int): size of dictionary of embeddings for words.
feat_vocab_sizes ([int], optional): list of size of dictionary
of embeddings for each feature.
"""
def __init__(self, word_vec_size, position_encoding, feat_merge,
feat_vec_exponent, feat_vec_size, dropout,
word_padding_idx, feat_padding_idx,
word_vocab_size, feat_vocab_sizes=[]):
self.word_padding_idx = word_padding_idx
# Dimensions and padding for constructing the word embedding matrix
vocab_sizes = [word_vocab_size]
emb_dims = [word_vec_size]
pad_indices = [word_padding_idx]
# Dimensions and padding for feature embedding matrices
# (these have no effect if feat_vocab_sizes is empty)
if feat_merge == 'sum':
feat_dims = [word_vec_size] * len(feat_vocab_sizes)
elif feat_vec_size > 0:
feat_dims = [feat_vec_size] * len(feat_vocab_sizes)
else:
feat_dims = [int(vocab ** feat_vec_exponent)
for vocab in feat_vocab_sizes]
vocab_sizes.extend(feat_vocab_sizes)
emb_dims.extend(feat_dims)
pad_indices.extend(feat_padding_idx)
# The embedding matrix look-up tables. The first look-up table
# is for words. Subsequent ones are for features, if any exist.
emb_params = zip(vocab_sizes, emb_dims, pad_indices)
embeddings = [nn.Embedding(vocab, dim, padding_idx=pad)
for vocab, dim, pad in emb_params]
emb_luts = Elementwise(feat_merge, embeddings)
# The final output size of word + feature vectors. This can vary
# from the word vector size if and only if features are defined.
# This is the attribute you should access if you need to know
# how big your embeddings are going to be.
self.embedding_size = (sum(emb_dims) if feat_merge == 'concat'
else word_vec_size)
# The sequence of operations that converts the input sequence
# into a sequence of embeddings. At minimum this consists of
# looking up the embeddings for each word and feature in the
# input. Model parameters may require the sequence to contain
# additional operations as well.
super(Embeddings, self).__init__()
self.make_embedding = nn.Sequential()
self.make_embedding.add_module('emb_luts', emb_luts)
if feat_merge == 'mlp':
in_dim = sum(emb_dims)
out_dim = word_vec_size
mlp = nn.Sequential(BottleLinear(in_dim, out_dim), nn.ReLU())
self.make_embedding.add_module('mlp', mlp)
if position_encoding:
pe = PositionalEncoding(dropout, self.embedding_size)
self.make_embedding.add_module('pe', pe)
@property
def word_lut(self):
return self.make_embedding[0][0]
@property
def emb_luts(self):
return self.make_embedding[0]
def load_pretrained_vectors(self, emb_file, fixed):
"""Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
fixed (bool) : if true, embeddings are not updated
"""
##########minhao#############
# fixed = True
############################
#print(fixed)
if emb_file:
pretrained = torch.load(emb_file)
self.word_lut.weight.data.copy_(pretrained)
if fixed:
self.word_lut.weight.requires_grad = False
def forward(self, input):
"""
Computes the embeddings for words and features.
Args:
input (`LongTensor`): index tensor `[len x batch x nfeat]`
Return:
`FloatTensor`: word embeddings `[len x batch x embedding_size]`
"""
in_length, in_batch, nfeat = input.size()
aeq(nfeat, len(self.emb_luts))
emb = self.make_embedding(input)
out_length, out_batch, emb_size = emb.size()
aeq(in_length, out_length)
aeq(in_batch, out_batch)
aeq(emb_size, self.embedding_size)
return emb
| 6,674 | 37.142857 | 77 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/CopyGenerator.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
import onmt
import onmt.io
from onmt.Utils import aeq
class CopyGenerator(nn.Module):
"""
Generator module that additionally considers copying
words directly from the source.
"""
def __init__(self, opt, src_dict, tgt_dict):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(opt.rnn_size, len(tgt_dict))
self.linear_copy = nn.Linear(opt.rnn_size, 1)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def forward(self, hidden, attn, src_map):
"""
Computes p(w) = p(z=1) p_{copy}(w|z=0) + p(z=0) * p_{softmax}(w|z=0)
"""
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.tgt_dict.stoi[onmt.io.PAD_WORD]] = -float('inf')
prob = F.softmax(logits)
# Probability of copying p(z=1) batch.
copy = F.sigmoid(self.linear_copy(hidden))
# Probibility of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - copy.expand_as(prob))
mul_attn = torch.mul(attn, copy.expand_as(attn))
copy_prob = torch.bmm(mul_attn.view(-1, batch, slen)
.transpose(0, 1),
src_map.transpose(0, 1)).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
class CopyGeneratorCriterion(object):
def __init__(self, vocab_size, force_copy, pad, eps=1e-20):
self.force_copy = force_copy
self.eps = eps
self.offset = vocab_size
self.pad = pad
def __call__(self, scores, align, target):
align = align.view(-1)
# Copy prob.
out = scores.gather(1, align.view(-1, 1) + self.offset) \
.view(-1).mul(align.ne(0).float())
tmp = scores.gather(1, target.view(-1, 1)).view(-1)
# Regular prob (no unks and unks that can't be copied)
if not self.force_copy:
out = out + self.eps + tmp.mul(target.ne(0).float()) + \
tmp.mul(align.eq(0).float()).mul(target.eq(0).float())
else:
# Forced copy.
out = out + self.eps + tmp.mul(align.eq(0).float())
# Drop padding.
loss = -out.log().mul(target.ne(self.pad).float()).sum()
return loss
class CopyGeneratorLossCompute(onmt.Loss.LossComputeBase):
"""
Copy Generator Loss Computation.
"""
def __init__(self, generator, tgt_vocab, dataset,
force_copy, eps=1e-20):
super(CopyGeneratorLossCompute, self).__init__(generator, tgt_vocab)
self.dataset = dataset
self.force_copy = force_copy
self.criterion = CopyGeneratorCriterion(len(tgt_vocab), force_copy,
self.padding_idx)
def _make_shard_state(self, batch, output, range_, attns):
""" See base class for args description. """
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
}
def _compute_loss(self, batch, output, target, copy_attn, align):
"""
Compute the loss. The args must match self._make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(self._bottle(output),
self._bottle(copy_attn),
batch.src_map)
loss = self.criterion(scores, align, target)
scores_data = scores.data.clone()
scores_data = self.dataset.collapse_copy_scores(
self._unbottle(scores_data, batch.batch_size),
batch, self.tgt_vocab)
scores_data = self._bottle(scores_data)
# Correct target copy token instead of <unk>
# tgt[i] = align[i] + len(tgt_vocab)
# for i such that tgt[i] == 0 and align[i] != 0
target_data = target.data.clone()
correct_mask = target_data.eq(0) * align.data.ne(0)
correct_copy = (align.data + len(self.tgt_vocab)) * correct_mask.long()
target_data = target_data + correct_copy
# Coverage loss term.
loss_data = loss.data.clone()
stats = self._stats(loss_data, scores_data, target_data)
return loss, stats
| 5,166 | 34.881944 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/StackedRNN.py | import torch
import torch.nn as nn
class StackedLSTM(nn.Module):
"""
Our own implementation of stacked LSTM.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class StackedGRU(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRU, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_1 = []
for i, layer in enumerate(self.layers):
h_1_i = layer(input, hidden[0][i])
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
h_1 = torch.stack(h_1)
return input, (h_1,)
| 1,755 | 28.266667 | 66 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/MultiHeadedAttn.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from onmt.Utils import aeq
from onmt.modules.UtilClass import BottleLinear, BottleSoftmax
class MultiHeadedAttention(nn.Module):
''' Multi-Head Attention module from
"Attention is All You Need".
'''
def __init__(self, head_count, model_dim, p=0.1):
"""
Args:
head_count(int): number of parallel heads.
model_dim(int): the dimension of keys/values/queries in this
MultiHeadedAttention, must be divisible by head_count.
"""
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.linear_values = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.linear_query = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.sm = BottleSoftmax()
self.activation = nn.ReLU()
self.dropout = nn.Dropout(p)
self.res_dropout = nn.Dropout(p)
def forward(self, key, value, query, mask=None):
# CHECKS
batch, k_len, d = key.size()
batch_, k_len_, d_ = value.size()
aeq(batch, batch_)
aeq(k_len, k_len_)
aeq(d, d_)
batch_, q_len, d_ = query.size()
aeq(batch, batch_)
aeq(d, d_)
aeq(self.model_dim % 8, 0)
if mask is not None:
batch_, q_len_, k_len_ = mask.size()
aeq(batch_, batch)
aeq(k_len_, k_len)
aeq(q_len_ == q_len)
# END CHECKS
def shape_projection(x):
b, l, d = x.size()
return x.view(b, l, self.head_count, self.dim_per_head) \
.transpose(1, 2).contiguous() \
.view(b * self.head_count, l, self.dim_per_head)
def unshape_projection(x, q):
b, l, d = q.size()
return x.view(b, self.head_count, l, self.dim_per_head) \
.transpose(1, 2).contiguous() \
.view(b, l, self.head_count * self.dim_per_head)
residual = query
key_up = shape_projection(self.linear_keys(key))
value_up = shape_projection(self.linear_values(value))
query_up = shape_projection(self.linear_query(query))
scaled = torch.bmm(query_up, key_up.transpose(1, 2))
scaled = scaled / math.sqrt(self.dim_per_head)
bh, l, dim_per_head = scaled.size()
b = bh // self.head_count
if mask is not None:
scaled = scaled.view(b, self.head_count, l, dim_per_head)
mask = mask.unsqueeze(1).expand_as(scaled)
scaled = scaled.masked_fill(Variable(mask), -float('inf')) \
.view(bh, l, dim_per_head)
attn = self.sm(scaled)
# Return one attn
top_attn = attn \
.view(b, self.head_count, l, dim_per_head)[:, 0, :, :] \
.contiguous()
drop_attn = self.dropout(self.sm(scaled))
# values : (batch * 8) x qlen x dim
out = unshape_projection(torch.bmm(drop_attn, value_up), residual)
# Residual and layer norm
ret = self.res_dropout(out)
# CHECK
batch_, q_len_, d_ = ret.size()
aeq(q_len, q_len_)
aeq(batch, batch_)
aeq(d, d_)
# END CHECK
return ret, top_attn
| 3,828 | 34.453704 | 74 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/Gate.py | """
Context gate is a decoder module that takes as input the previous word
embedding, the current decoder state and the attention state, and produces a
gate.
The gate can be used to select the input from the target side context
(decoder state), from the source context (attention state) or both.
"""
import torch
import torch.nn as nn
def context_gate_factory(type, embeddings_size, decoder_size,
attention_size, output_size):
"""Returns the correct ContextGate class"""
gate_types = {'source': SourceContextGate,
'target': TargetContextGate,
'both': BothContextGate}
assert type in gate_types, "Not valid ContextGate type: {0}".format(type)
return gate_types[type](embeddings_size, decoder_size, attention_size,
output_size)
class ContextGate(nn.Module):
"""Implement up to the computation of the gate"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(ContextGate, self).__init__()
input_size = embeddings_size + decoder_size + attention_size
self.gate = nn.Linear(input_size, output_size, bias=True)
self.sig = nn.Sigmoid()
self.source_proj = nn.Linear(attention_size, output_size)
self.target_proj = nn.Linear(embeddings_size + decoder_size,
output_size)
def forward(self, prev_emb, dec_state, attn_state):
input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1)
z = self.sig(self.gate(input_tensor))
proj_source = self.source_proj(attn_state)
proj_target = self.target_proj(
torch.cat((prev_emb, dec_state), dim=1))
return z, proj_source, proj_target
class SourceContextGate(nn.Module):
"""Apply the context gate only to the source context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(SourceContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(
prev_emb, dec_state, attn_state)
return self.tanh(target + z * source)
class TargetContextGate(nn.Module):
"""Apply the context gate only to the target context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(TargetContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh(z * target + source)
class BothContextGate(nn.Module):
"""Apply the context gate to both contexts"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(BothContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh((1. - z) * target + z * source)
| 3,600 | 38.571429 | 78 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/UtilClass.py | import torch
import torch.nn as nn
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0]*size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class Bottle2(nn.Module):
def forward(self, input):
if len(input.size()) <= 3:
return super(Bottle2, self).forward(input)
size = input.size()
out = super(Bottle2, self).forward(input.view(size[0]*size[1],
size[2], size[3]))
return out.contiguous().view(size[0], size[1], size[2], size[3])
class LayerNorm(nn.Module):
''' Layer normalization module '''
def __init__(self, d_hid, eps=1e-3):
super(LayerNorm, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, dim=1)
sigma = torch.std(z, dim=1)
# HACK. PyTorch is changing behavior
if mu.dim() == 1:
mu = mu.unsqueeze(1)
sigma = sigma.unsqueeze(1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out.mul(self.a_2.expand_as(ln_out)) \
+ self.b_2.expand_as(ln_out)
return ln_out
class BottleLinear(Bottle, nn.Linear):
pass
class BottleLayerNorm(Bottle, LayerNorm):
pass
class BottleSoftmax(Bottle, nn.Softmax):
pass
class Elementwise(nn.ModuleList):
"""
A simple network container.
Parameters are a list of modules.
Inputs are a 3d Variable whose last dimension is the same length
as the list.
Outputs are the result of applying modules to inputs elementwise.
An optional merge parameter allows the outputs to be reduced to a
single Variable.
"""
def __init__(self, merge=None, *args):
assert merge in [None, 'first', 'concat', 'sum', 'mlp']
self.merge = merge
super(Elementwise, self).__init__(*args)
def forward(self, input):
inputs = [feat.squeeze(2) for feat in input.split(1, dim=2)]
assert len(self) == len(inputs)
outputs = [f(x) for f, x in zip(self, inputs)]
if self.merge == 'first':
return outputs[0]
elif self.merge == 'concat' or self.merge == 'mlp':
return torch.cat(outputs, 2)
elif self.merge == 'sum':
return sum(outputs)
else:
return outputs
| 2,769 | 30.123596 | 78 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/StructuredAttention.py | import torch.nn as nn
import torch
import torch.cuda
from torch.autograd import Variable
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations."
:cite:`DBLP:journals/corr/LiuL17d`
"""
def __init__(self, eps=1e-5):
self.eps = eps
super(MatrixTree, self).__init__()
def forward(self, input):
laplacian = input.exp() + self.eps
output = input.clone()
for b in range(input.size(0)):
lap = laplacian[b].masked_fill(
Variable(torch.eye(input.size(1)).cuda().ne(0)), 0)
lap = -lap + torch.diag(lap.sum(0))
# store roots on diagonal
lap[0] = input[b].diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1)\
.expand_as(input[b]).transpose(0, 1)
term1 = input[b].exp().mul(factor).clone()
term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output[b] = term1 - term2
roots_output = input[b].diag().exp().mul(
inv_laplacian.transpose(0, 1)[0])
output[b] = output[b] + torch.diag(roots_output)
return output
if __name__ == "__main__":
dtree = MatrixTree()
q = torch.rand(1, 5, 5).cuda()
marg = dtree.forward(Variable(q))
print(marg.sum(1))
| 1,597 | 32.291667 | 77 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/Conv2Conv.py | """
Implementation of "Convolutional Sequence to Sequence Learning"
"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import onmt.modules
from onmt.modules.WeightNorm import WeightNormConv2d
from onmt.Models import EncoderBase
from onmt.Models import DecoderState
from onmt.Utils import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def shape_transform(x):
""" Tranform the size of the tensors to fit for conv input. """
return torch.unsqueeze(torch.transpose(x, 1, 2), 3)
class GatedConv(nn.Module):
def __init__(self, input_size, width=3, dropout=0.2, nopad=False):
super(GatedConv, self).__init__()
self.conv = WeightNormConv2d(input_size, 2 * input_size,
kernel_size=(width, 1), stride=(1, 1),
padding=(width // 2 * (1 - nopad), 0))
init.xavier_uniform(self.conv.weight, gain=(4 * (1 - dropout))**0.5)
self.dropout = nn.Dropout(dropout)
def forward(self, x_var, hidden=None):
x_var = self.dropout(x_var)
x_var = self.conv(x_var)
out, gate = x_var.split(int(x_var.size(1) / 2), 1)
out = out * F.sigmoid(gate)
return out
class StackedCNN(nn.Module):
def __init__(self, num_layers, input_size, cnn_kernel_width=3,
dropout=0.2):
super(StackedCNN, self).__init__()
self.dropout = dropout
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(
GatedConv(input_size, cnn_kernel_width, dropout))
def forward(self, x, hidden=None):
for conv in self.layers:
x = x + conv(x)
x *= SCALE_WEIGHT
return x
class CNNEncoder(EncoderBase):
"""
Encoder built on CNN.
"""
def __init__(self, num_layers, hidden_size,
cnn_kernel_width, dropout, embeddings):
super(CNNEncoder, self).__init__()
self.embeddings = embeddings
input_size = embeddings.embedding_size
self.linear = nn.Linear(input_size, hidden_size)
self.cnn = StackedCNN(num_layers, hidden_size,
cnn_kernel_width, dropout)
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
emb = emb.transpose(0, 1).contiguous()
emb_reshape = emb.view(emb.size(0) * emb.size(1), -1)
emb_remap = self.linear(emb_reshape)
emb_remap = emb_remap.view(emb.size(0), emb.size(1), -1)
emb_remap = shape_transform(emb_remap)
out = self.cnn(emb_remap)
return emb_remap.squeeze(3).transpose(0, 1).contiguous(),\
out.squeeze(3).transpose(0, 1).contiguous()
class CNNDecoder(nn.Module):
"""
Decoder built on CNN, which consists of resduial convolutional layers,
with ConvMultiStepAttention.
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, cnn_kernel_width, dropout, embeddings):
super(CNNDecoder, self).__init__()
# Basic attributes.
self.decoder_type = 'cnn'
self.num_layers = num_layers
self.hidden_size = hidden_size
self.cnn_kernel_width = cnn_kernel_width
self.embeddings = embeddings
self.dropout = dropout
# Build the CNN.
input_size = self.embeddings.embedding_size
self.linear = nn.Linear(input_size, self.hidden_size)
self.conv_layers = nn.ModuleList()
for i in range(self.num_layers):
self.conv_layers.append(
GatedConv(self.hidden_size, self.cnn_kernel_width,
self.dropout, True))
self.attn_layers = nn.ModuleList()
for i in range(self.num_layers):
self.attn_layers.append(
onmt.modules.ConvMultiStepAttention(self.hidden_size))
# CNNDecoder has its own attention mechanism.
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type)
self._copy = True
def forward(self, input, context, state, context_lengths=None):
"""
Forward through the CNNDecoder.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
CNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder CNN for
initializing the decoder.
context_lengths (LongTensor): the source context lengths, this is
not used for CNNDecoder, but for interface compatibility.
Returns:
outputs (FloatTensor): a Tensor sequence of output from the decoder
of shape (len x batch x hidden_size).
state (FloatTensor): final hidden state from the decoder.
attns (dict of (str, FloatTensor)): a dictionary of different
type of attention Tensor from the decoder
of shape (src_len x batch).
"""
# CHECKS
assert isinstance(state, CNNDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
# END CHECKS
if state.previous_input is not None:
input = torch.cat([state.previous_input, input], 0)
# Initialize return variables.
outputs = []
attns = {"std": []}
assert not self._copy, "Copy mechanism not yet tested in conv2conv"
if self._copy:
attns["copy"] = []
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_context_t = context.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_context_c = state.init_src.transpose(0, 1).contiguous()
# Run the forward pass of the CNNDecoder.
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = Variable(torch.zeros(x.size(0), x.size(1),
self.cnn_kernel_width - 1, 1))
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_context_t, src_context_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(input)
return outputs, state, attns
def init_decoder_state(self, src, context, enc_hidden):
return CNNDecoderState(context, enc_hidden)
class CNNDecoderState(DecoderState):
def __init__(self, context, enc_hidden):
self.init_src = (context + enc_hidden) * SCALE_WEIGHT
self.previous_input = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input,)
def update_state(self, input):
""" Called for every decoder forward pass. """
self.previous_input = input
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.init_src = Variable(
self.init_src.data.repeat(1, beam_size, 1), volatile=True)
| 8,735 | 36.016949 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/GlobalAttention.py | import torch
import torch.nn as nn
from onmt.modules.UtilClass import BottleLinear
from onmt.Utils import aeq, sequence_mask
class GlobalAttention(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, dim, coverage=False, attn_type="dot"):
super(GlobalAttention, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert (self.attn_type in ["dot", "general", "mlp"]), (
"Please select a valid attention type.")
if self.attn_type == "general":
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == "mlp":
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
# mlp wants it with bias
out_bias = self.attn_type == "mlp"
self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]`
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch*tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input, context, context_lengths=None, coverage=None):
"""
Args:
input (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
context (`FloatTensor`): source vectors `[batch x src_len x dim]`
context_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]`
"""
# one step input
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, sourceL_ = coverage.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
context += self.linear_cover(cover).view_as(context)
context = self.tanh(context)
# compute attention scores, as in Luong et al.
align = self.score(input, context)
if context_lengths is not None:
mask = sequence_mask(context_lengths)
mask = mask.unsqueeze(1) # Make it broadcastable.
align.data.masked_fill_(1 - mask, -float('inf'))
# Softmax to normalize attention weights
align_vectors = self.sm(align.view(batch*targetL, sourceL))
align_vectors = align_vectors.view(batch, targetL, sourceL)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, context)
# concatenate
concat_c = torch.cat([c, input], 2).view(batch*targetL, dim*2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
if self.attn_type in ["general", "dot"]:
attn_h = self.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
| 7,024 | 31.224771 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/SRU.py | """
Implementation of "Training RNNs as Fast as CNNs".
TODO: turn to pytorch's implementation when it is available.
This implementation is adpoted from the author of the paper:
https://github.com/taolei87/sru/blob/master/cuda_functional.py.
"""
# flake8: noqa
import subprocess
import platform
import os
import re
import argparse
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from collections import namedtuple
# For command-line option parsing
class CheckSRU(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(CheckSRU, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values == 'SRU':
check_sru_requirement(abort=True)
# Check pass, set the args.
setattr(namespace, self.dest, values)
# This SRU version implements its own cuda-level optimization,
# so it requires that:
# 1. `cupy` and `pynvrtc` python package installed.
# 2. pytorch is built with cuda support.
# 3. library path set: export LD_LIBRARY_PATH=<cuda lib path>.
def check_sru_requirement(abort=False):
"""
Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False.
"""
# Check 1.
try:
if platform.system() == 'Windows':
subprocess.check_output('pip freeze | findstr cupy', shell=True)
subprocess.check_output('pip freeze | findstr pynvrtc',
shell=True)
else: # Unix-like systems
subprocess.check_output('pip freeze | grep -w cupy', shell=True)
subprocess.check_output('pip freeze | grep -w pynvrtc',
shell=True)
except subprocess.CalledProcessError:
if not abort:
return False
raise AssertionError("Using SRU requires 'cupy' and 'pynvrtc' "
"python packages installed.")
# Check 2.
if torch.cuda.is_available() is False:
if not abort:
return False
raise AssertionError("Using SRU requires pytorch built with cuda.")
# Check 3.
pattern = re.compile(".*cuda/lib.*")
ld_path = os.getenv('LD_LIBRARY_PATH', "")
if re.match(pattern, ld_path) is None:
if not abort:
return False
raise AssertionError("Using SRU requires setting cuda lib path, e.g. "
"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.")
return True
SRU_CODE = """
extern "C" {
__forceinline__ __device__ float sigmoidf(float x)
{
return 1.f / (1.f + expf(-x));
}
__forceinline__ __device__ float reluf(float x)
{
return (x > 0.f) ? x : 0.f;
}
__global__ void sru_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
for (int row = 0; row < len; ++row)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u;
xp += ncols_x;
cp += ncols;
hp += ncols;
}
}
__global__ void sru_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len,
const int batch, const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *xp = (k == 3) ? (x + col + (len-1)*ncols) : (up + 3);
const float *cp = c + col + (len-1)*ncols;
const float *ghp = grad_h + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float *gxp = (k == 3) ? (grad_x + col + (len-1)*ncols) : (gup + 3);
for (int row = len-1; row >= 0; --row)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u;
xp -= ncols_x;
cp -= ncols;
gup -= ncols_u;
gxp -= ncols_x;
ghp -= ncols;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
__global__ void sru_bi_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
assert ((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const int d2 = d*2;
const bool flip = (col%d2) >= d;
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
if (flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
hp += (len-1)*ncols;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u_;
xp += ncols_x_;
cp += ncols_;
hp += ncols_;
}
}
__global__ void sru_bi_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len, const int batch,
const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
assert((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const int d2 = d*2;
const bool flip = ((col%d2) >= d);
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
const float *cp = c + col;
const float *ghp = grad_h + col;
float *gup = grad_u + (col*k);
float *gxp = (k == 3) ? (grad_x + col) : (gup + 3);
if (!flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
ghp += (len-1)*ncols;
gup += (len-1)*ncols_u;
gxp += (len-1)*ncols_x;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (cnt<len-1)?(*(cp-ncols_)):(*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u_;
xp -= ncols_x_;
cp -= ncols_;
gup -= ncols_u_;
gxp -= ncols_x_;
ghp -= ncols_;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
}
"""
if check_sru_requirement():
from cupy.cuda import function
from pynvrtc.compiler import Program
# This cuda() is important, it sets up device to use.
tmp_ = torch.rand(1, 1).cuda()
sru_prog = Program(SRU_CODE.encode('utf-8'),
'sru_prog.cu'.encode('utf-8'))
sru_ptx = sru_prog.compile()
sru_mod = function.Module()
sru_mod.load(bytes(sru_ptx.encode()))
SRU_FWD_FUNC = sru_mod.get_function('sru_fwd')
SRU_BWD_FUNC = sru_mod.get_function('sru_bwd')
SRU_BiFWD_FUNC = sru_mod.get_function('sru_bi_fwd')
SRU_BiBWD_FUNC = sru_mod.get_function('sru_bi_bwd')
stream = namedtuple('Stream', ['ptr'])
SRU_STREAM = stream(ptr=torch.cuda.current_stream().cuda_stream)
class SRU_Compute(Function):
def __init__(self, activation_type, d_out, bidirectional=False):
super(SRU_Compute, self).__init__()
self.activation_type = activation_type
self.d_out = d_out
self.bidirectional = bidirectional
def forward(self, u, x, bias, init=None, mask_h=None):
bidir = 2 if self.bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k // 2 if self.bidirectional else k
ncols = batch * d * bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1) // thread_per_block+1
init_ = x.new(ncols).zero_() if init is None else init
size = (length, batch, d*bidir) if x.dim() == 3 else (batch, d*bidir)
c = x.new(*size)
h = x.new(*size)
FUNC = SRU_FWD_FUNC if not self.bidirectional else SRU_BiFWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
length,
batch,
d,
k_,
h.data_ptr(),
c.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
self.save_for_backward(u, x, bias, init, mask_h)
self.intermediate = c
if x.dim() == 2:
last_hidden = c
elif self.bidirectional:
# -> directions x batch x dim
last_hidden = torch.stack((c[-1, :, :d], c[0, :, d:]))
else:
last_hidden = c[-1]
return h, last_hidden
def backward(self, grad_h, grad_last):
if self.bidirectional:
grad_last = torch.cat((grad_last[0], grad_last[1]), 1)
bidir = 2 if self.bidirectional else 1
u, x, bias, init, mask_h = self.saved_tensors
c = self.intermediate
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k//2 if self.bidirectional else k
ncols = batch*d*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1) // thread_per_block+1
init_ = x.new(ncols).zero_() if init is None else init
grad_u = u.new(*u.size())
grad_bias = x.new(2, batch, d*bidir)
grad_init = x.new(batch, d*bidir)
# For DEBUG
# size = (length, batch, x.size(-1)) \
# if x.dim() == 3 else (batch, x.size(-1))
# grad_x = x.new(*x.size()) if k_ == 3 else x.new(*size).zero_()
# Normal use
grad_x = x.new(*x.size()) if k_ == 3 else None
FUNC = SRU_BWD_FUNC if not self.bidirectional else SRU_BiBWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
c.data_ptr(),
grad_h.contiguous().data_ptr(),
grad_last.contiguous().data_ptr(),
length,
batch,
d,
k_,
grad_u.data_ptr(),
grad_x.data_ptr() if k_ == 3 else 0,
grad_bias.data_ptr(),
grad_init.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
return grad_u, grad_x, grad_bias.sum(1).view(-1), grad_init, None
class SRUCell(nn.Module):
def __init__(self, n_in, n_out, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
super(SRUCell, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.rnn_dropout = rnn_dropout
self.dropout = dropout
self.bidirectional = bidirectional
self.activation_type = 2 if use_relu else (1 if use_tanh else 0)
out_size = n_out*2 if bidirectional else n_out
k = 4 if n_in != out_size else 3
self.size_per_dir = n_out*k
self.weight = nn.Parameter(torch.Tensor(
n_in,
self.size_per_dir*2 if bidirectional else self.size_per_dir
))
self.bias = nn.Parameter(torch.Tensor(
n_out*4 if bidirectional else n_out*2
))
self.init_weight()
def init_weight(self):
val_range = (3.0/self.n_in)**0.5
self.weight.data.uniform_(-val_range, val_range)
self.bias.data.zero_()
def set_bias(self, bias_val=0):
n_out = self.n_out
if self.bidirectional:
self.bias.data[n_out*2:].zero_().add_(bias_val)
else:
self.bias.data[n_out:].zero_().add_(bias_val)
def forward(self, input, c0=None):
assert input.dim() == 2 or input.dim() == 3
n_in, n_out = self.n_in, self.n_out
batch = input.size(-2)
if c0 is None:
c0 = Variable(input.data.new(
batch, n_out if not self.bidirectional else n_out*2
).zero_())
if self.training and (self.rnn_dropout > 0):
mask = self.get_dropout_mask_((batch, n_in), self.rnn_dropout)
x = input * mask.expand_as(input)
else:
x = input
x_2d = x if x.dim() == 2 else x.contiguous().view(-1, n_in)
u = x_2d.mm(self.weight)
if self.training and (self.dropout > 0):
bidir = 2 if self.bidirectional else 1
mask_h = self.get_dropout_mask_((batch, n_out*bidir), self.dropout)
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0, mask_h
)
else:
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0
)
return h, c
def get_dropout_mask_(self, size, p):
w = self.weight.data
return Variable(w.new(*size).bernoulli_(1-p).div_(1-p))
class SRU(nn.Module):
def __init__(self, input_size, hidden_size,
num_layers=2, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
# An entry check here, will catch on train side and translate side
# if requirements are not satisfied.
check_sru_requirement(abort=True)
super(SRU, self).__init__()
self.n_in = input_size
self.n_out = hidden_size
self.depth = num_layers
self.dropout = dropout
self.rnn_dropout = rnn_dropout
self.rnn_lst = nn.ModuleList()
self.bidirectional = bidirectional
self.out_size = hidden_size*2 if bidirectional else hidden_size
for i in range(num_layers):
sru_cell = SRUCell(
n_in=self.n_in if i == 0 else self.out_size,
n_out=self.n_out,
dropout=dropout if i+1 != num_layers else 0,
rnn_dropout=rnn_dropout,
bidirectional=bidirectional,
use_tanh=use_tanh,
use_relu=use_relu,
)
self.rnn_lst.append(sru_cell)
def set_bias(self, bias_val=0):
for l in self.rnn_lst:
l.set_bias(bias_val)
def forward(self, input, c0=None, return_hidden=True):
assert input.dim() == 3 # (len, batch, n_in)
dir_ = 2 if self.bidirectional else 1
if c0 is None:
zeros = Variable(input.data.new(
input.size(1), self.n_out*dir_
).zero_())
c0 = [zeros for i in range(self.depth)]
else:
if isinstance(c0, tuple):
# RNNDecoderState wraps hidden as a tuple.
c0 = c0[0]
assert c0.dim() == 3 # (depth, batch, dir_*n_out)
c0 = [h.squeeze(0) for h in c0.chunk(self.depth, 0)]
prevx = input
lstc = []
for i, rnn in enumerate(self.rnn_lst):
h, c = rnn(prevx, c0[i])
prevx = h
lstc.append(c)
if self.bidirectional:
# fh -> (layers*directions) x batch x dim
fh = torch.cat(lstc)
else:
fh = torch.stack(lstc)
if return_hidden:
return prevx, fh
else:
return prevx
| 23,334 | 36.57649 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/WeightNorm.py | """
Implementation of "Weight Normalization: A Simple Reparameterization
to Accelerate Training of Deep Neural Networks"
As a reparameterization method, weight normalization is same
as BatchNormalization, but it doesn't depend on minibatch.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from torch.autograd import Variable
def get_var_maybe_avg(namespace, var_name, training, polyak_decay):
# utility for retrieving polyak averaged params
# Update average
v = getattr(namespace, var_name)
v_avg = getattr(namespace, var_name + '_avg')
v_avg -= (1 - polyak_decay) * (v_avg - v.data)
if training:
return v
else:
return Variable(v_avg)
def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
# utility for retrieving polyak averaged params
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(
namespace, vn, training, polyak_decay))
return vars
class WeightNormLinear(nn.Linear):
def __init__(self, in_features, out_features,
init_scale=1., polyak_decay=0.9995):
super(WeightNormLinear, self).__init__(
in_features, out_features, bias=True)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_features))
self.b = self.bias
self.register_buffer(
'V_avg', torch.zeros(out_features, in_features))
self.register_buffer('g_avg', torch.zeros(out_features))
self.register_buffer('b_avg', torch.zeros(out_features))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_features * in_features
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
# norm is out_features * 1
v_norm = self.V.data / \
self.V.data.norm(2, 1).expand_as(self.V.data)
# batch_size * out_features
x_init = F.linear(x, Variable(v_norm)).data
# out_features
m_init, v_init = x_init.mean(0).squeeze(
0), x_init.var(0).squeeze(0)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
x_init = scale_init.view(1, -1).expand_as(x_init) \
* (x_init - m_init.view(1, -1).expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
V, g, b = get_vars_maybe_avg(self, ['V', 'g', 'b'],
self.training,
polyak_decay=self.polyak_decay)
# batch_size * out_features
x = F.linear(x, V)
scalar = g / torch.norm(V, 2, 1).squeeze(1)
x = scalar.view(1, -1).expand_as(x) * x + \
b.view(1, -1).expand_as(x)
return x
class WeightNormConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding,
dilation, groups)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_channels, in_channels // groups, * kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()
).type_as(self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.view(self.out_channels, -1)\
.norm(2, 1).view(self.out_channels, *(
[1] * (len(self.kernel_size) + 1))).expand_as(self.V.data)
x_init = F.conv2d(x, Variable(v_norm), None, self.stride,
self.padding, self.dilation, self.groups).data
t_x_init = x_init.transpose(0, 1).contiguous().view(
self.out_channels, -1)
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(
x_init) * (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
v, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = torch.norm(v.view(self.out_channels, -1), 2, 1)
if len(scalar.size()) == 2:
scalar = g / scalar.squeeze(1)
else:
scalar = g / scalar
w = scalar.view(self.out_channels, *
([1] * (len(v.size()) - 1))).expand_as(v) * v
x = F.conv2d(x, w, b, self.stride,
self.padding, self.dilation, self.groups)
return x
class WeightNormConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConvTranspose2d, self).__init__(
in_channels, out_channels,
kernel_size, stride,
padding, output_padding,
groups)
# in_channels, out_channels, *kernel_size
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# in_channels, out_channels, *kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.transpose(0, 1).contiguous() \
.view(self.out_channels, -1).norm(2, 1).view(
self.in_channels, self.out_channels,
*([1] * len(self.kernel_size))).expand_as(self.V.data)
x_init = F.conv_transpose2d(
x, Variable(v_norm), None, self.stride,
self.padding, self.output_padding, self.groups).data
# self.out_channels, 1
t_x_init = x_init.tranpose(0, 1).contiguous().view(
self.out_channels, -1)
# out_features
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(x_init)\
* (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
V, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = g / \
torch.norm(V.transpose(0, 1).contiguous().view(
self.out_channels, -1), 2, 1).squeeze(1)
w = scalar.view(self.in_channels, self.out_channels,
*([1] * (len(V.size()) - 2))).expand_as(V) * V
x = F.conv_transpose2d(x, w, b, self.stride,
self.padding, self.output_padding,
self.groups)
return x
| 9,574 | 39.231092 | 78 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/AudioEncoder.py | import math
import torch.nn as nn
import torch.nn.functional as F
class AudioEncoder(nn.Module):
"""
Encoder recurrent neural network for Images.
"""
def __init__(self, num_layers, bidirectional, rnn_size, dropout,
sample_rate, window_size):
"""
Args:
num_layers (int): number of encoder layers.
bidirectional (bool): bidirectional encoder.
rnn_size (int): size of hidden states of the rnn.
dropout (float): dropout probablity.
"""
super(AudioEncoder, self).__init__()
self.num_layers = num_layers
self.num_directions = 2 if bidirectional else 1
self.hidden_size = rnn_size
self.layer1 = nn.Conv2d(1, 32, kernel_size=(41, 11),
padding=(0, 10), stride=(2, 2))
self.batch_norm1 = nn.BatchNorm2d(32)
self.layer2 = nn.Conv2d(32, 32, kernel_size=(21, 11),
padding=(0, 0), stride=(2, 1))
self.batch_norm2 = nn.BatchNorm2d(32)
input_size = int(math.floor((sample_rate * window_size) / 2) + 1)
input_size = int(math.floor(input_size - 41) / 2 + 1)
input_size = int(math.floor(input_size - 21) / 2 + 1)
input_size *= 32
self.rnn = nn.LSTM(input_size, rnn_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
def load_pretrained_vectors(self, opt):
# Pass in needed options only when modify function definition.
pass
def forward(self, input, lengths=None):
# (batch_size, 1, nfft, t)
# layer 1
input = self.batch_norm1(self.layer1(input[:, :, :, :]))
# (batch_size, 32, nfft/2, t/2)
input = F.hardtanh(input, 0, 20, inplace=True)
# (batch_size, 32, nfft/2/2, t/2)
# layer 2
input = self.batch_norm2(self.layer2(input))
# (batch_size, 32, nfft/2/2, t/2)
input = F.hardtanh(input, 0, 20, inplace=True)
batch_size = input.size(0)
length = input.size(3)
input = input.view(batch_size, -1, length)
input = input.transpose(0, 2).transpose(1, 2)
output, hidden = self.rnn(input)
return hidden, output
| 2,327 | 33.746269 | 73 | py |
Seq2Sick | Seq2Sick-master/onmt/modules/ImageEncoder.py | import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
class ImageEncoder(nn.Module):
"""
Encoder recurrent neural network for Images.
"""
def __init__(self, num_layers, bidirectional, rnn_size, dropout):
"""
Args:
num_layers (int): number of encoder layers.
bidirectional (bool): bidirectional encoder.
rnn_size (int): size of hidden states of the rnn.
dropout (float): dropout probablity.
"""
super(ImageEncoder, self).__init__()
self.num_layers = num_layers
self.num_directions = 2 if bidirectional else 1
self.hidden_size = rnn_size
self.layer1 = nn.Conv2d(3, 64, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer2 = nn.Conv2d(64, 128, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer3 = nn.Conv2d(128, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer4 = nn.Conv2d(256, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer5 = nn.Conv2d(256, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer6 = nn.Conv2d(512, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.batch_norm1 = nn.BatchNorm2d(256)
self.batch_norm2 = nn.BatchNorm2d(512)
self.batch_norm3 = nn.BatchNorm2d(512)
input_size = 512
self.rnn = nn.LSTM(input_size, rnn_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
self.pos_lut = nn.Embedding(1000, input_size)
def load_pretrained_vectors(self, opt):
# Pass in needed options only when modify function definition.
pass
def forward(self, input, lengths=None):
batch_size = input.size(0)
# (batch_size, 64, imgH, imgW)
# layer 1
input = F.relu(self.layer1(input[:, :, :, :]-0.5), True)
# (batch_size, 64, imgH/2, imgW/2)
input = F.max_pool2d(input, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 128, imgH/2, imgW/2)
# layer 2
input = F.relu(self.layer2(input), True)
# (batch_size, 128, imgH/2/2, imgW/2/2)
input = F.max_pool2d(input, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer 3
# batch norm 1
input = F.relu(self.batch_norm1(self.layer3(input)), True)
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer4
input = F.relu(self.layer4(input), True)
# (batch_size, 256, imgH/2/2/2, imgW/2/2)
input = F.max_pool2d(input, kernel_size=(1, 2), stride=(1, 2))
# (batch_size, 512, imgH/2/2/2, imgW/2/2)
# layer 5
# batch norm 2
input = F.relu(self.batch_norm2(self.layer5(input)), True)
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
input = F.max_pool2d(input, kernel_size=(2, 1), stride=(2, 1))
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
input = F.relu(self.batch_norm3(self.layer6(input)), True)
# # (batch_size, 512, H, W)
all_outputs = []
for row in range(input.size(2)):
inp = input[:, :, row, :].transpose(0, 2)\
.transpose(1, 2)
row_vec = torch.Tensor(batch_size).type_as(inp.data)\
.long().fill_(row)
pos_emb = self.pos_lut(Variable(row_vec))
with_pos = torch.cat(
(pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
outputs, hidden_t = self.rnn(with_pos)
all_outputs.append(outputs)
out = torch.cat(all_outputs, 0)
return hidden_t, out
| 4,023 | 36.962264 | 76 | py |
Seq2Sick | Seq2Sick-master/onmt/io/IO.py | # -*- coding: utf-8 -*-
import os
import codecs
from collections import Counter, defaultdict
from itertools import chain, count
import torch
import torchtext.data
import torchtext.vocab
from onmt.Utils import aeq
PAD_WORD = '<blank>'
UNK = 0
BOS_WORD = '<s>'
EOS_WORD = '</s>'
def _getstate(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
torchtext.vocab.Vocab.__getstate__ = _getstate
torchtext.vocab.Vocab.__setstate__ = _setstate
def get_fields(data_type, n_src_features, n_tgt_features):
"""
Args:
data_type: type of the source input. Options are [text|img|audio].
n_src_features: the number of source features to create Field for.
n_tgt_features: the number of target features to create Field for.
Returns:
A dictionary whose keys are strings and whose values are the
corresponding Field objects.
"""
fields = {}
if data_type == 'text':
fields["src"] = torchtext.data.Field(
pad_token=PAD_WORD,
include_lengths=True)
elif data_type == 'img':
def make_img(data, _):
c = data[0].size(0)
h = max([t.size(1) for t in data])
w = max([t.size(2) for t in data])
imgs = torch.zeros(len(data), c, h, w)
for i, img in enumerate(data):
imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
return imgs
fields["src"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_img, sequential=False)
elif data_type == 'audio':
def make_audio(data, _):
nfft = data[0].size(0)
t = max([t.size(1) for t in data])
sounds = torch.zeros(len(data), 1, nfft, t)
for i, spect in enumerate(data):
sounds[i, :, :, 0:spect.size(1)] = spect
return sounds
fields["src"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_audio, sequential=False)
for j in range(n_src_features):
fields["src_feat_"+str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_"+str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, _):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_src, sequential=False)
def make_tgt(data, _):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
sequential=False)
return fields
def load_fields_from_vocab(vocab, data_type="text"):
"""
Load Field objects from `vocab.pt` file.
"""
vocab = dict(vocab)
n_src_features = len(collect_features(vocab, 'src'))
n_tgt_features = len(collect_features(vocab, 'tgt'))
fields = get_fields(data_type, n_src_features, n_tgt_features)
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
fields[k].vocab = v
return fields
def save_fields_to_vocab(fields):
"""
Save Vocab objects in Field objects to `vocab.pt` file.
"""
vocab = []
for k, f in fields.items():
if 'vocab' in f.__dict__:
f.vocab.stoi = dict(f.vocab.stoi)
vocab.append((k, f.vocab))
return vocab
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return torchtext.vocab.Vocab(merged,
specials=[PAD_WORD, BOS_WORD, EOS_WORD],
max_size=vocab_size)
def make_features(batch, side, data_type='text'):
"""
Args:
batch (Variable): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input. Options are [text|img].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
keys = sorted([k for k in batch.__dict__ if feat_start in k])
features = [batch.__dict__[k] for k in keys]
levels = [data] + features
if data_type == 'text':
return torch.cat([level.unsqueeze(2) for level in levels], 2)
else:
return levels[0]
def extract_features(tokens):
"""
Args:
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features.
"""
if not tokens:
return [], [], -1
split_tokens = [token.split(u"│") for token in tokens]
split_tokens = [token for token in split_tokens if token[0]]
token_size = len(split_tokens[0])
assert all(len(token) == token_size for token in split_tokens), \
"all words must have the same number of features"
words_and_features = list(zip(*split_tokens))
words = words_and_features[0]
features = words_and_features[1:]
return words, features, token_size - 1
def collect_features(fields, side="src"):
"""
Collect features from Field object.
"""
assert side in ["src", "tgt"]
feats = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feats.append(key)
return feats
def collect_feature_vocabs(fields, side):
"""
Collect feature Vocab objects from Field object.
"""
assert side in ['src', 'tgt']
feature_vocabs = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feature_vocabs.append(fields[key].vocab)
return feature_vocabs
def build_dataset(fields, data_type, src_path, tgt_path, src_dir=None,
src_seq_length=0, tgt_seq_length=0,
src_seq_length_trunc=0, tgt_seq_length_trunc=0,
dynamic_dict=True, sample_rate=0,
window_size=0, window_stride=0, window=None,
normalize_audio=True, use_filter_pred=True):
# Hide this import inside to avoid circular dependency problem.
from onmt.io import TextDataset, ImageDataset, AudioDataset
# Build src/tgt examples iterator from corpus files, also extract
# number of features. For all data types, the tgt side corpus is
# in form of text.
src_examples_iter, num_src_feats = \
_make_examples_nfeats_tpl(data_type, src_path, src_dir,
src_seq_length_trunc, sample_rate,
window_size, window_stride,
window, normalize_audio)
tgt_examples_iter, num_tgt_feats = \
_make_text_examples_nfeats_tpl(tgt_path, tgt_seq_length_trunc, "tgt")
if data_type == 'text':
dataset = TextDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
src_seq_length=src_seq_length,
tgt_seq_length=tgt_seq_length,
dynamic_dict=dynamic_dict,
use_filter_pred=use_filter_pred)
elif data_type == 'img':
dataset = ImageDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
tgt_seq_length=tgt_seq_length,
use_filter_pred=use_filter_pred)
elif data_type == 'audio':
dataset = AudioDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
tgt_seq_length=tgt_seq_length,
sample_rate=sample_rate,
window_size=window_size,
window_stride=window_stride,
window=window,
normalize_audio=normalize_audio,
use_filter_pred=use_filter_pred)
return dataset
def build_vocab(train_datasets, data_type, share_vocab,
src_vocab_size, src_words_min_frequency,
tgt_vocab_size, tgt_words_min_frequency):
"""
Args:
train_datasets: a list of train dataset.
data_type: "text", "img" or "audio"?
share_vocab(bool): share source and target vocabulary?
src_vocab_size(int): size of the source vocabulary.
src_words_min_frequency(int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_size(int): size of the target vocabulary.
tgt_words_min_frequency(int): the minimum frequency needed to
include a target word in the vocabulary.
"""
# All datasets have same fields, get the first one is OK.
fields = train_datasets[0].fields
fields["tgt"].build_vocab(*train_datasets, max_size=tgt_vocab_size,
min_freq=tgt_words_min_frequency)
for j in range(train_datasets[0].n_tgt_feats):
fields["tgt_feat_" + str(j)].build_vocab(*train_datasets)
if data_type == 'text':
fields["src"].build_vocab(*train_datasets, max_size=src_vocab_size,
min_freq=src_words_min_frequency)
for j in range(train_datasets[0].n_src_feats):
fields["src_feat_" + str(j)].build_vocab(*train_datasets)
# Merge the input and output vocabularies.
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
merged_vocab = merge_vocabs(
[fields["src"].vocab, fields["tgt"].vocab],
vocab_size=src_vocab_size)
fields["src"].vocab = merged_vocab
fields["tgt"].vocab = merged_vocab
def _join_dicts(*args):
"""
Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys.
"""
return dict(chain(*[d.items() for d in args]))
def _peek(seq):
"""
Args:
seq: an iterator.
Returns:
the first thing returned by calling next() on the iterator
and an iterator created by re-chaining that value to the beginning
of the iterator.
"""
first = next(seq)
return first, chain([first], seq)
def _construct_example_fromlist(data, fields):
ex = torchtext.data.Example()
for (name, field), val in zip(fields, data):
if field is not None:
setattr(ex, name, field.preprocess(val))
else:
setattr(ex, name, val)
return ex
def _read_text_file(path, truncate, side):
"""
Args:
path: location of a src or tgt file.
truncate: maximum sequence length (0 for unlimited).
Yields:
(word, features, nfeat) triples for each line.
"""
with codecs.open(path, "r", "utf-8") as corpus_file:
for i, line in enumerate(corpus_file):
line = line.strip().split()
if truncate:
line = line[:truncate]
words, feats, n_feats = extract_features(line)
example_dict = {side: words, "indices": i}
if feats:
prefix = side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
yield example_dict, n_feats
def _read_img_file(path, src_dir, side, truncate=None):
"""
Args:
path: location of a src file containing image paths
src_dir: location of source images
side: 'src' or 'tgt'
truncate: maximum img size ((0,0) or None for unlimited)
Yields:
a dictionary containing image data, path and index for each line.
"""
assert (src_dir is not None) and os.path.exists(src_dir),\
'src_dir must be a valid directory if data_type is img'
global Image, transforms
from PIL import Image
from torchvision import transforms
with codecs.open(path, "r", "utf-8") as corpus_file:
index = 0
for line in corpus_file:
img_path = os.path.join(src_dir, line.strip())
if not os.path.exists(img_path):
img_path = line
assert os.path.exists(img_path), \
'img path %s not found' % (line.strip())
img = transforms.ToTensor()(Image.open(img_path))
if truncate and truncate != (0, 0):
if not (img.size(1) <= truncate[0]
and img.size(2) <= truncate[1]):
continue
example_dict = {side: img,
side+'_path': line.strip(),
'indices': index}
index += 1
yield example_dict
def _read_audio_file(path, src_dir, side, sample_rate, window_size,
window_stride, window, normalize_audio, truncate=None):
"""
Args:
path: location of a src file containing audio paths.
src_dir: location of source audio files.
side: 'src' or 'tgt'.
sample_rate: sample_rate.
window_size: window size for spectrogram in seconds.
window_stride: window stride for spectrogram in seconds.
window: window type for spectrogram generation.
normalize_audio: subtract spectrogram by mean and divide by std or not
truncate: maximum audio length (0 or None for unlimited).
Yields:
a dictionary containing audio data for each line.
"""
assert (src_dir is not None) and os.path.exists(src_dir),\
"src_dir must be a valid directory if data_type is audio"
global torchaudio, librosa, np
import torchaudio
import librosa
import numpy as np
with codecs.open(path, "r", "utf-8") as corpus_file:
index = 0
for line in corpus_file:
audio_path = os.path.join(src_dir, line.strip())
if not os.path.exists(audio_path):
audio_path = line
assert os.path.exists(audio_path), \
'audio path %s not found' % (line.strip())
sound, sample_rate = torchaudio.load(audio_path)
if truncate and truncate > 0:
if sound.size(0) > truncate:
continue
assert sample_rate == sample_rate, \
'Sample rate of %s != -sample_rate (%d vs %d)' \
% (audio_path, sample_rate, sample_rate)
sound = sound.numpy()
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # average multiple channels
n_fft = int(sample_rate * window_size)
win_length = n_fft
hop_length = int(sample_rate * window_stride)
# STFT
d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window)
spect, _ = librosa.magphase(d)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if normalize_audio:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
example_dict = {side: spect,
side + '_path': line.strip(),
'indices': index}
index += 1
yield example_dict
def _make_text_examples_nfeats_tpl(path, truncate, side):
"""
Process the text corpus into (example_dict iterator, num_feats) tuple.
"""
assert side in ['src', 'tgt']
if path is None:
return (None, 0)
# All examples have same number of features, so we peek first one
# to get the num_feats.
examples_nfeats_iter = _read_text_file(path, truncate, side)
(_, num_feats), examples_nfeats_iter = _peek(examples_nfeats_iter)
examples_iter = (ex for ex, nfeats in examples_nfeats_iter)
return (examples_iter, num_feats)
def _make_examples_nfeats_tpl(data_type, src_path, src_dir,
src_seq_length_trunc, sample_rate,
window_size, window_stride,
window, normalize_audio):
"""
Process the corpus into (example_dict iterator, num_feats) tuple
on source side for different 'data_type'.
"""
if data_type == 'text':
src_examples_iter, num_src_feats = _make_text_examples_nfeats_tpl(
src_path, src_seq_length_trunc, "src")
elif data_type == 'img':
src_examples_iter = _read_img_file(src_path, src_dir, "src")
num_src_feats = 0 # Source side(img) has no features.
elif data_type == 'audio':
src_examples_iter = _read_audio_file(src_path, src_dir, "src",
sample_rate, window_size,
window_stride, window,
normalize_audio)
num_src_feats = 0 # Source side(audio) has no features.
return src_examples_iter, num_src_feats
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
self.batches = torchtext.data.pool(
self.data(), self.batch_size,
self.sort_key, self.batch_size_fn,
random_shuffler=self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
class ONMTDatasetBase(torchtext.data.Dataset):
"""
A dataset basically supports iteration over all the examples
it contains. We currently have 3 datasets inheriting this base
for 3 types of corpus respectively: "text", "img", "audio".
Internally it initializes an `torchtext.data.Dataset` object with
the following attributes:
`examples`: a sequence of `torchtext.data.Example` objects.
`fields`: a dictionary associating str keys with Field objects. Does not
necessarily have the same keys as the input fields.
"""
def __init__(self, *args, **kwargs):
examples, fields, filter_pred = self._process_corpus(*args, **kwargs)
super(ONMTDatasetBase, self).__init__(
examples, fields, filter_pred
)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __reduce_ex__(self, proto):
"This is a hack. Something is broken with torch pickle."
return super(ONMTDatasetBase, self).__reduce_ex__()
def collapse_copy_scores(self, scores, batch, tgt_vocab):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious.
"""
offset = len(tgt_vocab)
for b in range(batch.batch_size):
index = batch.indices.data[b]
src_vocab = self.src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
scores[:, b, ti] += scores[:, b, offset + i]
scores[:, b, offset + i].fill_(1e-20)
return scores
@staticmethod
def coalesce_datasets(datasets):
"""Coalesce all dataset instances. """
final = datasets[0]
for d in datasets[1:]:
# `src_vocabs` is a list of `torchtext.vocab.Vocab`.
# Each sentence transforms into on Vocab.
# Coalesce them into one big list.
final.src_vocabs += d.src_vocabs
# All datasets have same number of features.
aeq(final.n_src_feats, d.n_src_feats)
aeq(final.n_tgt_feats, d.n_tgt_feats)
# `examples` is a list of `torchtext.data.Example`.
# Coalesce them into one big list.
final.examples += d.examples
# All datasets have same fields, no need to update.
return final
| 21,995 | 34.477419 | 78 | py |
Seq2Sick | Seq2Sick-master/onmt/io/TextDataset.py | # -*- coding: utf-8 -*-
from collections import Counter
import io
import sys
import torch
import torchtext
from onmt.Utils import aeq
from onmt.io.IO import ONMTDatasetBase, _join_dicts, _peek,\
_construct_example_fromlist, extract_features
class TextDataset(ONMTDatasetBase):
""" Dataset for data_type=='text' """
def sort_key(self, ex):
"Sort using the size of source example."
return -len(ex.src)
def _process_corpus(self, fields, src_examples_iter, tgt_examples_iter,
num_src_feats=0, num_tgt_feats=0,
src_seq_length=0, tgt_seq_length=0,
dynamic_dict=True, use_filter_pred=True):
"""
Build Example objects, Field objects, and filter_pred function
from text corpus.
Args:
fields: a dictionary of Field objects. Keys are like 'src',
'tgt', 'src_map', and 'alignment'.
src_examples_iter: preprocessed source example_dict iterator.
tgt_examples_iter: preprocessed target example_dict iterator.
num_src_feats: number of source side features.
num_tgt_feats: number of target side features.
src_seq_length: maximum source sequence length.
tgt_seq_length: maximum target sequence length.
dynamic_dict: create dynamic dictionaries?
use_filter_pred: use a custom filter predicate to filter examples?
Returns:
constructed tuple of Examples objects, Field objects, filter_pred.
"""
self.data_type = 'text'
# self.src_vocabs: mutated in dynamic_dict, used in
# collapse_copy_scores and in Translator.py
self.src_vocabs = []
self.n_src_feats = num_src_feats
self.n_tgt_feats = num_tgt_feats
# Each element of an example is a dictionary whose keys represents
# at minimum the src tokens and their indices and potentially also
# the src and tgt features and alignment information.
if tgt_examples_iter is not None:
examples_iter = (_join_dicts(src, tgt) for src, tgt in
zip(src_examples_iter, tgt_examples_iter))
else:
examples_iter = src_examples_iter
if dynamic_dict:
examples_iter = self._dynamic_dict(examples_iter)
# Peek at the first to see which fields are used.
ex, examples_iter = _peek(examples_iter)
keys = ex.keys()
out_fields = [(k, fields[k]) if k in fields else (k, None)
for k in keys]
example_values = ([ex[k] for k in keys] for ex in examples_iter)
out_examples = (_construct_example_fromlist(ex_values, out_fields)
for ex_values in example_values)
def filter_pred(example):
return 0 < len(example.src) <= src_seq_length \
and 0 < len(example.tgt) <= tgt_seq_length
filter_pred = filter_pred if use_filter_pred else lambda x: True
return out_examples, out_fields, filter_pred
def _dynamic_dict(self, examples_iter):
for example in examples_iter:
src = example["src"]
src_vocab = torchtext.vocab.Vocab(Counter(src))
self.src_vocabs.append(src_vocab)
# Mapping source tokens to indices in the dynamic dict.
src_map = torch.LongTensor([src_vocab.stoi[w] for w in src])
example["src_map"] = src_map
if "tgt" in example:
tgt = example["tgt"]
mask = torch.LongTensor(
[0] + [src_vocab.stoi[w] for w in tgt] + [0])
example["alignment"] = mask
yield example
class ShardedTextCorpusIterator(object):
"""
This is the iterator for text corpus, used for sharding large text
corpus into small shards, to avoid hogging memory.
Inside this iterator, it automatically divides the corpus file into
shards of size `shard_size`. Then, for each shard, it processes
into (example_dict, n_features) tuples when iterates.
"""
def __init__(self, corpus_path, line_truncate, side, shard_size,
assoc_iter=None):
"""
Args:
corpus_path: the corpus file path.
line_truncate: the maximum length of a line to read.
0 for unlimited.
side: "src" or "tgt".
shard_size: the shard size, 0 means not sharding the file.
assoc_iter: if not None, it is the associate iterator that
this iterator should align its step with.
"""
try:
# The codecs module seems to have bugs with seek()/tell(),
# so we use io.open().
self.corpus = io.open(corpus_path, "r", encoding="utf-8")
except IOError:
sys.stderr.write("Failed to open corpus file: %s" % corpus_path)
sys.exit(1)
self.line_truncate = line_truncate
self.side = side
self.shard_size = shard_size
self.assoc_iter = assoc_iter
self.last_pos = 0
self.line_index = -1
self.eof = False
def __iter__(self):
"""
Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`.
"""
if self.assoc_iter is not None:
# We have associate iterator, just yields tuples
# util we run parallel with it.
while self.line_index < self.assoc_iter.line_index:
line = self.corpus.readline()
if line == '':
raise AssertionError(
"Two corpuses must have same number of lines!")
self.line_index += 1
yield self._example_dict_iter(line)
if self.assoc_iter.eof:
self.eof = True
self.corpus.close()
else:
# Yield tuples util this shard's size reaches the threshold.
self.corpus.seek(self.last_pos)
while True:
if self.shard_size != 0 and self.line_index % 64 == 0:
# This part of check is time consuming on Py2 (but
# it is quite fast on Py3, weird!). So we don't bother
# to check for very line. Instead we chekc every 64
# lines. Thus we are not dividing exactly per
# `shard_size`, but it is not too much difference.
cur_pos = self.corpus.tell()
if cur_pos >= self.last_pos + self.shard_size:
self.last_pos = cur_pos
raise StopIteration
line = self.corpus.readline()
if line == '':
self.eof = True
self.corpus.close()
raise StopIteration
self.line_index += 1
yield self._example_dict_iter(line)
def hit_end(self):
return self.eof
@property
def num_feats(self):
# We peek the first line and seek back to
# the beginning of the file.
saved_pos = self.corpus.tell()
line = self.corpus.readline().split()
if self.line_truncate:
line = line[:self.line_truncate]
_, _, self.n_feats = extract_features(line)
self.corpus.seek(saved_pos)
return self.n_feats
def _example_dict_iter(self, line):
line = line.split()
if self.line_truncate:
line = line[:self.line_truncate]
words, feats, n_feats = extract_features(line)
example_dict = {self.side: words, "indices": self.line_index}
if feats:
# All examples must have same number of features.
aeq(self.n_feats, n_feats)
prefix = self.side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
return example_dict
| 8,181 | 36.87963 | 78 | py |
Seq2Sick | Seq2Sick-master/onmt/translate/Beam.py | from __future__ import division
import torch
"""
Class for managing the internals of the beam search process.
Takes care of beams, back pointers, and scores.
"""
class Beam(object):
def __init__(self, size, pad, bos, eos,
n_best=1, cuda=False,
global_scorer=None):
self.size = size
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.all_scores = []
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [self.tt.LongTensor(size)
.fill_(pad)]
self.next_ys[0][0] = bos
# Has EOS topped the beam yet.
self._eos = eos
self.eos_top = False
# The attentions (matrix) for each time.
self.attn = []
# Time and k pair for finished.
self.finished = []
self.n_best = n_best
# Information for global scoring.
self.global_scorer = global_scorer
self.global_state = {}
def get_current_state(self):
"Get the outputs for the current timestep."
return self.next_ys[-1]
def get_current_origin(self):
"Get the backpointers for the current timestep."
return self.prev_ks[-1]
def advance(self, word_probs, attn_out):
"""
Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Parameters:
* `word_probs`- probs of advancing from the last step (K x words)
* `attn_out`- attention at the last step
Returns: True if beam search is complete.
"""
num_words = word_probs.size(1)
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_scores = word_probs + \
self.scores.unsqueeze(1).expand_as(word_probs)
# Don't let EOS have children.
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
beam_scores[i] = -1e20
else:
beam_scores = word_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0,
True, True)
self.all_scores.append(self.scores)
self.scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append((best_scores_id - prev_k * num_words))
self.attn.append(attn_out.index_select(0, prev_k))
if self.global_scorer is not None:
self.global_scorer.update_global_state(self)
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
s = self.scores[i]
if self.global_scorer is not None:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.next_ys[-1][0] == self._eos:
# self.all_scores.append(self.scores)
self.eos_top = True
def done(self):
return self.eos_top and len(self.finished) >= self.n_best
def sort_finished(self, minimum=None):
if minimum is not None:
i = 0
# Add from beam until we have minimum outputs.
while len(self.finished) < minimum:
s = self.scores[i]
if self.global_scorer is not None:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
self.finished.sort(key=lambda a: -a[0])
scores = [sc for sc, _, _ in self.finished]
ks = [(t, k) for _, t, k in self.finished]
return scores, ks
def get_hyp(self, timestep, k):
"""
Walk back to construct the full hypothesis.
"""
hyp, attn = [], []
for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):
hyp.append(self.next_ys[j+1][k])
attn.append(self.attn[j][k])
k = self.prev_ks[j][k]
return hyp[::-1], torch.stack(attn[::-1])
class GNMTGlobalScorer(object):
"""
Google NMT ranking score from Wu et al.
"""
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def score(self, beam, logprobs):
"Additional term add to log probability"
cov = beam.global_state["coverage"]
pen = self.beta * torch.min(cov, cov.clone().fill_(1.0)).log().sum(1)
l_term = (((5 + len(beam.next_ys)) ** self.alpha) /
((5 + 1) ** self.alpha))
return (logprobs / l_term) + pen
def update_global_state(self, beam):
"Keeps the coverage vector as sum of attens"
if len(beam.prev_ks) == 1:
beam.global_state["coverage"] = beam.attn[-1]
else:
beam.global_state["coverage"] = beam.global_state["coverage"] \
.index_select(0, beam.prev_ks[-1]).add(beam.attn[-1])
| 5,514 | 32.834356 | 79 | py |
Seq2Sick | Seq2Sick-master/onmt/translate/Translator.py | import torch
from torch.autograd import Variable
import onmt.translate.Beam
import onmt.io
class Translator(object):
def __init__(self, model, fields,
beam_size, n_best,
max_length,
global_scorer, copy_attn, cuda,
beam_trace):
self.model = model
self.fields = fields
self.n_best = n_best
self.max_length = max_length
self.global_scorer = global_scorer
self.copy_attn = copy_attn
self.beam_size = beam_size
self.cuda = cuda
# for debugging
self.beam_accum = None
if beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def translate_batch(self, batch, data, newsrc=None, FLAG=True):
# (0) Prep each of the components of the search.
# And helper method for reducing verbosity.
beam_size = self.beam_size
batch_size = batch.batch_size
data_type = data.data_type
vocab = self.fields["tgt"].vocab
beam = [onmt.translate.Beam(beam_size, n_best=self.n_best,
cuda=self.cuda,
global_scorer=self.global_scorer,
pad=vocab.stoi[onmt.io.PAD_WORD],
eos=vocab.stoi[onmt.io.EOS_WORD],
bos=vocab.stoi[onmt.io.BOS_WORD])
for __ in range(batch_size)]
# Help functions for working with beams and batches
def var(a): return Variable(a, volatile=True)
def rvar(a): return var(a.repeat(1, beam_size, 1))
def bottle(m):
return m.view(batch_size * beam_size, -1)
def unbottle(m):
return m.view(beam_size, batch_size, -1)
# (1) Run the encoder on the src.
if FLAG:
src = onmt.io.make_features(batch, 'src', data_type)
else:
src = newsrc
src_lengths = None
if data_type == 'text':
_, src_lengths = batch.src
enc_states, context = self.model.encoder(src, src_lengths)
dec_states = self.model.decoder.init_decoder_state(
src, context, enc_states)
if src_lengths is None:
src_lengths = torch.Tensor(batch_size).type_as(context.data)\
.long()\
.fill_(context.size(0))
# (2) Repeat src objects `beam_size` times.
src_map = rvar(batch.src_map.data) if data_type == 'text' else None
context = rvar(context.data)
context_lengths = src_lengths.repeat(beam_size)
dec_states.repeat_beam_size_times(beam_size)
# (3) run the decoder to generate sentences, using beam search.
for i in range(self.max_length):
if all((b.done() for b in beam)):
break
#print([b.get_current_state() for b in beam])
# Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
inp = var(torch.stack([b.get_current_state() for b in beam])
.t().contiguous().view(1, -1))
# Turn any copied words to UNKs
# 0 is unk
if self.copy_attn:
inp = inp.masked_fill(
inp.gt(len(self.fields["tgt"].vocab) - 1), 0)
# Temporary kludge solution to handle changed dim expectation
# in the decoder
inp = inp.unsqueeze(2)
# Run one step.
dec_out, dec_states, attn = self.model.decoder(
inp, context, dec_states, context_lengths=context_lengths)
dec_out = dec_out.squeeze(0)
# dec_out: beam x rnn_size
# (b) Compute a vector of batch*beam word scores.
if not self.copy_attn:
out = self.model.generator.forward(dec_out).data
out = unbottle(out)
# beam x tgt_vocab
else:
out = self.model.generator.forward(dec_out,
attn["copy"].squeeze(0),
src_map)
# beam x (tgt_vocab + extra_vocab)
out = data.collapse_copy_scores(
unbottle(out.data),
batch, self.fields["tgt"].vocab)
# beam x tgt_vocab
out = out.log()
# (c) Advance each beam.
for j, b in enumerate(beam):
b.advance(
out[:, j],
unbottle(attn["std"]).data[:, j, :context_lengths[j]])
dec_states.beam_update(j, b.get_current_origin(), beam_size)
# (4) Extract sentences from beam.
ret = self._from_beam(beam)
ret["gold_score"] = [0] * batch_size
if "tgt" in batch.__dict__:
ret["gold_score"] = self._run_target(batch, data)
ret["batch"] = batch
return ret
def _from_beam(self, beam):
ret = {"predictions": [],
"scores": [],
"attention": []}
for b in beam:
n_best = self.n_best
scores, ks = b.sort_finished(minimum=n_best)
hyps, attn = [], []
for i, (times, k) in enumerate(ks[:n_best]):
hyp, att = b.get_hyp(times, k)
hyps.append(hyp)
attn.append(att)
ret["predictions"].append(hyps)
ret["scores"].append(scores)
ret["attention"].append(attn)
return ret
def _run_target(self, batch, data):
data_type = data.data_type
if data_type == 'text':
_, src_lengths = batch.src
else:
src_lengths = None
src = onmt.io.make_features(batch, 'src', data_type)
tgt_in = onmt.io.make_features(batch, 'tgt')[:-1]
# (1) run the encoder on the src
enc_states, context = self.model.encoder(src, src_lengths)
dec_states = self.model.decoder.init_decoder_state(src,
context, enc_states)
# (2) if a target is specified, compute the 'goldScore'
# (i.e. log likelihood) of the target under the model
tt = torch.cuda if self.cuda else torch
gold_scores = tt.FloatTensor(batch.batch_size).fill_(0)
dec_out, dec_states, attn = self.model.decoder(
tgt_in, context, dec_states, context_lengths=src_lengths)
tgt_pad = self.fields["tgt"].vocab.stoi[onmt.io.PAD_WORD]
for dec, tgt in zip(dec_out, batch.tgt[1:].data):
# Log prob of each word.
out = self.model.generator.forward(dec)
tgt = tgt.unsqueeze(1)
scores = out.data.gather(1, tgt)
scores.masked_fill_(tgt.eq(tgt_pad), 0)
gold_scores += scores
return gold_scores
def getEmbedding(self, batch, FLAG=True):
# (1) Run the encoder on the src.
if FLAG:
_, src_lengths = batch.src
src = onmt.io.make_features(batch, 'src')
src.volatile = False
#src.require_grads = False
#print(src)
emb = self.model.encoder.get_embedding(src)
else:
emb = self.model.encoder.get_embedding(batch)
src = None
return emb,src
def getOutput(self, emb, src, batch):
# (1) Run the encoder on the src.
#src.require_grads = False
beam_size = 1
batch_size = batch.batch_size
#data_tpye = data.data_type
vocab = self.fields["tgt"].vocab
_, src_lengths = batch.src
src_lengths = None
encStates, context = self.model.encoder(emb, FLAG=False)
decStates = self.model.decoder.init_decoder_state(
src, context, encStates)
if src_lengths is None:
src_lengths = torch.Tensor(batch_size).type_as(context.data)\
.long()\
.fill_(context.size(0))
def var(a): return Variable(a, volatile=False)
context_lengths = src_lengths.repeat(beam_size)
decStates.repeat_beam_size_times(beam_size)
scorer = None
beam = [onmt.translate.Beam(beam_size, n_best=self.n_best,
cuda=self.cuda,
global_scorer=self.global_scorer,
pad=vocab.stoi[onmt.io.PAD_WORD],
eos=vocab.stoi[onmt.io.EOS_WORD],
bos=vocab.stoi[onmt.io.BOS_WORD])
for __ in range(batch_size)]
# (2) run the decoder to generate sentences, using beam search.
def bottle(m):
return m.view(batch_size * beam_size, -1)
def unbottle(m):
return m.view(beam_size, batch_size, -1)
output_a = Variable(torch.zeros(1, 50004).cuda())
for i in range(self.max_length):
if all((b.done() for b in beam)):
break
# Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
inp = var(torch.stack([b.get_current_state() for b in beam])
.t().contiguous().view(1, -1))
# Turn any copied words to UNKs
# 0 is unk
if self.copy_attn:
inp = inp.masked_fill(
inp.gt(len(self.fields["tgt"].vocab) - 1), 0)
# Temporary kludge solution to handle changed dim expectation
#if i<3:
# print(inp)
# in the decoder
inp = inp.unsqueeze(2)
# Run one step.
decOut, decStates, attn = \
self.model.decoder(inp, context, decStates, context_lengths=context_lengths)
decOut = decOut.squeeze(0)
# decOut: beam x rnn_size
output = self.model.generator[0].forward(decOut)
output_a = torch.cat((output_a,output), 0)
#if i<1:
#print(unbottle(attn["std"]))
# (b) Compute a vector of batch*beam word scores.
if not self.copy_attn:
out = self.model.generator.forward(decOut).data
out = unbottle(out)
# beam x tgt_vocab
else:
out = self.model.generator.forward(decOut,
attn["copy"].squeeze(0),
srcMap)
# beam x (tgt_vocab + extra_vocab)
out = dataset.collapse_copy_scores(
unbottle(out.data),
batch, self.fields["tgt"].vocab)
# beam x tgt_vocab
out = out.log()
# (c) Advance each beam.
for j, b in enumerate(beam):
b.advance(out[:, j], unbottle(attn["std"]).data[:, j, :context_lengths[j]])
decStates.beam_update(j, b.get_current_origin(), beam_size)
output_i = self.model.generator[1].forward(output_a[1:,:])
return output_a[1:,:], attn, output_i
| 11,549 | 38.02027 | 92 | py |
Seq2Sick | Seq2Sick-master/onmt/translate/Translation.py | from __future__ import division, unicode_literals
import torch
import onmt.io
class TranslationBuilder(object):
def __init__(self, data, fields, n_best, replace_unk, has_tgt):
self.data = data
self.fields = fields
self.n_best = n_best
self.replace_unk = replace_unk
self.has_tgt = has_tgt
def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):
vocab = self.fields["tgt"].vocab
tokens = []
for tok in pred:
if tok < len(vocab):
tokens.append(vocab.itos[tok])
else:
tokens.append(src_vocab.itos[tok - len(vocab)])
if tokens[-1] == onmt.io.EOS_WORD:
tokens = tokens[:-1]
break
if self.replace_unk and (attn is not None) and (src is not None):
for i in range(len(tokens)):
if tokens[i] == vocab.itos[onmt.io.UNK]:
_, maxIndex = attn[i].max(0)
tokens[i] = src_raw[maxIndex[0]]
return tokens
def _build_source_tokens(self, src, src_vocab, pred):
vocab = self.fields["src"].vocab
tokens = []
for tok in pred:
if tok < len(vocab):
tokens.append(vocab.itos[tok])
else:
tokens.append(src_vocab.itos[tok - len(vocab)])
if tokens[-1] == onmt.io.EOS_WORD:
tokens = tokens[:-1]
break
# if self.replace_unk and (attn is not None) and (src is not None):
# for i in range(len(tokens)):
# if tokens[i] == vocab.itos[onmt.io.UNK]:
# _, maxIndex = attn[i].max(0)
# tokens[i] = src_raw[maxIndex[0]]
return tokens
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert(len(translation_batch["gold_score"]) ==
len(translation_batch["predictions"]))
batch_size = batch.batch_size
preds, predScore, attn, gold_score, indices = list(zip(
*sorted(zip(translation_batch["predictions"],
translation_batch["scores"],
translation_batch["attention"],
translation_batch["gold_score"],
batch.indices.data),
key=lambda x: x[-1])))
#print(preds)
# Sorting
inds, perm = torch.sort(batch.indices.data)
data_type = self.data.data_type
if data_type == 'text':
src = batch.src[0].data.index_select(1, perm)
else:
src = None
if self.has_tgt:
tgt = batch.tgt.data.index_select(1, perm)
else:
tgt = None
translations = []
for b in range(batch_size):
if data_type == 'text':
src_vocab = self.data.src_vocabs[inds[b]]
src_raw = self.data.examples[inds[b]].src
else:
src_vocab = None
src_raw = None
pred_sents = [self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
preds[b][n], attn[b][n])
for n in range(self.n_best)]
gold_sent = None
if tgt is not None:
gold_sent = self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
tgt[1:, b] if tgt is not None else None, None)
translation = Translation(src[:, b] if src is not None else None,
src_raw, pred_sents,
attn[b], predScore[b], gold_sent,
gold_score[b])
translations.append(translation)
return translations
def get_word(self, output_i, attn, batch):
batch_size = batch.batch_size
pred = []
ind = torch.max(output_i,1)[1]
for i in range(ind.size()[0]):
pred.append(ind[i].data[0])
preds = (pred,)
attn = (attn,)
inds, perm = torch.sort(batch.indices.data)
data_type = self.data.data_type
if data_type == 'text':
src = batch.src[0].data.index_select(1, perm)
else:
src = None
if self.has_tgt:
tgt = batch.tgt.data.index_select(1, perm)
else:
tgt = None
#n_best = 1
translations = []
#print(preds)
for b in range(batch_size):
if data_type == 'text':
src_vocab = self.data.src_vocabs[inds[b]]
src_raw = self.data.examples[inds[b]].src
else:
src_vocab = None
src_raw = None
pred_sents = [self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
preds[b], attn[b])]
gold_sent = None
if tgt is not None:
gold_sent = self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
tgt[1:, b] if tgt is not None else None, None)
translations.append(pred_sents)
return translations
def get_source(self, newsrc, batch):
pred = []
for i in range(newsrc.size()[0]):
pred.append(newsrc[i].data[0][0])
preds = (pred,)
#print(preds)
_, perm = torch.sort(batch.indices.data)
src = batch.src[0].data.index_select(1,perm)
src_vocab = self.data.src_vocabs[0]
pred_sents = self._build_source_tokens(src[:,0] if src is not None else None, src_vocab, preds[0])
return ' '.join(pred_sents)
class Translation(object):
def __init__(self, src, src_raw, pred_sents,
attn, pred_scores, tgt_sent, gold_score):
self.src = src
self.src_raw = src_raw
self.pred_sents = pred_sents
self.attns = attn
self.pred_scores = pred_scores
self.gold_sent = tgt_sent
self.gold_score = gold_score
def log(self, sent_number):
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
print("PRED SCORE: {:.4f}".format(best_score))
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += ("GOLD SCORE: {:.4f}".format(self.gold_score))
if len(self.pred_sents) > 1:
print('\nBEST HYP:')
for score, sent in zip(self.pred_score, self.pred_sents):
output += "[{:.4f}] {}\n".format(score, sent)
return output
| 7,063 | 35.225641 | 106 | py |
DGP | DGP-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'Deep and Linked Gaussian Process Emulations'
copyright = '2023, Deyu Ming'
author = 'Deyu Ming'
release = '2.2.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = ['sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax']
napoleon_google_docstring = True
napoleon_numpy_docstring = False
templates_path = ['_templates']
exclude_patterns = []
autodoc_mock_imports = ['numpy.random']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
#html_static_path = ['_static']
| 1,227 | 32.189189 | 87 | py |
SeConvNet | SeConvNet-main/model.py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Input
from tensorflow.keras.layers import Conv2D, Activation,BatchNormalization, Add
import numpy as np
#define SeConv block:
class SeConv_block(keras.layers.Layer):
def __init__(self, kernel_size, input_channels, **kwargs):
super(SeConv_block, self).__init__()
self.kernel_size = kernel_size
self.input_channels = input_channels
def build(self, input_shape):
kernel_init = tf.ones_initializer()
self.kernel = tf.Variable(name="kernel", initial_value=kernel_init(shape=(self.kernel_size, self.kernel_size, self.input_channels, 1), dtype='float32'),trainable=True)
def call(self, inputs):
# non-noisy pixels map:
M_hat = tf.math.not_equal(inputs, 0)
M_hat = tf.dtypes.cast(M_hat, tf.float32)
conv_input = tf.nn.conv2d(inputs, self.kernel, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC', dilations=None, name=None)
conv_M_hat = tf.nn.conv2d(M_hat, self.kernel, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC', dilations=None, name=None)
# find 0 in conv_M_hat and change to 1:
is_zero_conv_M_hat = tf.equal(conv_M_hat, 0)
is_zero_conv_M_hat = tf.dtypes.cast(is_zero_conv_M_hat, tf.float32)
change_zero_to_one_conv_M_hat = tf.math.add(conv_M_hat, is_zero_conv_M_hat)
S = tf.math.divide(conv_input, change_zero_to_one_conv_M_hat)
# noisy pixels map:
M = 1 - M_hat
# calculate R:
kernel_ones = np.ones((self.kernel_size, self.kernel_size, self.input_channels, 1))
kernel_ones = tf.constant(kernel_ones, dtype=tf.float32)
R = tf.nn.conv2d(M_hat, kernel_ones, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC', dilations=None, name=None)
R = tf.math.greater_equal(R, tf.constant(self.kernel_size-2, dtype=tf.float32))
R = tf.dtypes.cast(R, tf.float32)
y = tf.math.multiply(tf.math.multiply(S, M), R) + inputs
return y
def get_config(self):
config = super(SeConv_block, self).get_config()
config.update({"kernel_size": self.kernel_size})
return config
#___________________________________________________________________
# define model:
def SeConvNet(num_SeConv_block=7,depth=27,filters=64,image_channels=1):
layer_count = 0
inputs = Input(shape=(None,None,image_channels), name='input'+str(layer_count))
# 1st to 7th layers (SeConv_block):
x = inputs
for i in range(num_SeConv_block):
layer_count += 1
x = SeConv_block(2*layer_count+1, image_channels, name='SeConv_block'+str(layer_count))(x)
# 8th to 26th layers, Conv+BN+ReLU:
for i in range(depth-num_SeConv_block-1):
layer_count += 1
x = Conv2D(filters=filters, kernel_size=(3,3), strides=(1,1), kernel_initializer='Orthogonal', padding='same', use_bias = False, name='Conv'+str(layer_count))(x)
x = BatchNormalization(axis=3, momentum=0.1, epsilon=0.0001, name='BN'+str(layer_count))(x)
x = Activation('relu', name='ReLU'+str(layer_count))(x)
# last layer, Conv:
layer_count =+ 1
x = Conv2D(filters=image_channels, kernel_size=(3,3), strides=(1,1), kernel_initializer='Orthogonal', padding='same', use_bias = False, name='Conv'+str(layer_count))(x)
x = keras.layers.Multiply(name='Multiply')([x, tf.dtypes.cast(tf.math.equal(inputs, 0), tf.float32)])
outputs = Add(name='Add')([x, inputs])
model = keras.models.Model(inputs=inputs, outputs=outputs)
return model | 3,615 | 38.304348 | 175 | py |
SeConvNet | SeConvNet-main/train.py | import argparse
import numpy as np
from tensorflow import keras
from tensorflow.math import reduce_sum, square
import os
from model import SeConvNet
from SPN import SPN
from data_generator import data_gen
parser = argparse.ArgumentParser()
parser.add_argument('--noise_density', default=0.95, type=float, help='noise density, should be in the interval [0, 1]')
parser.add_argument('--image_channels', default=1, type=int, help='number of channels; 1 for gray images and 3 for color images')
parser.add_argument('--epoch', dest='epoch', type=int, default=50, help='number of epochs')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='batch size')
parser.add_argument('--lr', dest='lr', type=float, default=1e-3, help='initial learning rate for adam')
parser.add_argument('--train_dir', default='data/Train', type=str, help='path of train data')
parser.add_argument('--steps', dest='steps', type=int, default=2000, help='number of steps per epoch')
args = parser.parse_args()
color_mode = 'Gray' if args.image_channels == 1 else 'Color'
save_dir = os.path.join('weights', color_mode, 'SeConvNet_'+str(int(100*args.noise_density)))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model = SeConvNet(image_channels=args.image_channels)
model.summary
def train_datagen(epoch_iter=2000, epoch_num=5, batch_size=args.batch_size, data_dir=os.path.join(args.train_dir, color_mode)):
while(True):
n_count = 0
if n_count == 0:
xs = data_gen(data_dir)
indices = list(range(xs.shape[0]))
n_count = 1
for _ in range(epoch_num):
np.random.shuffle(indices)
for i in range(0, len(indices), batch_size):
batch_x = xs[indices[i:i+batch_size]]
batch_x = batch_x.astype('float32')/255.0
batch_y = SPN(batch_x, args.noise_density)
batch_y[batch_y==1] = 0.
yield batch_y, batch_x
def sum_squared_error(y_true, y_pred):
return reduce_sum(square(y_pred - y_true))/2
model.compile(optimizer=keras.optimizers.Adam(learning_rate=args.lr), loss=sum_squared_error)
def scheduler(epoch):
epochs=args.epoch
initial_lr = args.lr
if epoch<=int(0.7*epochs):
lr = initial_lr
else:
lr = initial_lr/10
print('current learning rate is %1.8f' %lr)
return lr
LearningRate_Scheduler = keras.callbacks.LearningRateScheduler(scheduler)
model_checkpoint = keras.callbacks.ModelCheckpoint(os.path.join(save_dir,'model_{epoch:03d}.hdf5'), verbose=1, save_best_only=False, save_weights_only=True)
csv_logger = keras.callbacks.CSVLogger(os.path.join(save_dir,'training.log'), separator=",", append=True)
history = model.fit(train_datagen(batch_size=args.batch_size),steps_per_epoch=args.steps, epochs=args.epoch, verbose=1, callbacks=[model_checkpoint, csv_logger, LearningRate_Scheduler]) | 2,919 | 39 | 185 | py |
icd-coding-benchmark | icd-coding-benchmark-main/app.py | #!/usr/bin/env python
"""
The interactive demo of ICD coding benchmark (prototype)
"""
import argparse
import copy
import csv
import numpy as np
import pandas as pd
import seaborn as sns
import streamlit as st
import torch
from captum.attr import LayerIntegratedGradients
from anemic.modules.preprocessors import ClinicalNotePreprocessor
from anemic.utils.configuration import Config
from anemic.utils.mapper import ConfigMapper
from anemic.utils.misc import html_word_importance
hash_funcs = {
Config: lambda x: hash(str(x)),
torch.nn.parameter.Parameter: lambda x: hash(x.shape),
}
@st.cache(hash_funcs=hash_funcs, allow_output_mutation=True)
def load_config():
parser = argparse.ArgumentParser(
description="Demo app for automatic ICD coding"
)
parser.add_argument(
"--config_path",
type=str,
action="store",
help="Path to the config file",
)
args = parser.parse_args()
config = Config(path=args.config_path)
return config
@st.cache(hash_funcs=hash_funcs, allow_output_mutation=True)
def load_modules(config):
# 1. Load preprocessor
# First, we change the config to initialize default params and subclasses
pp_config = copy.deepcopy(config.clinical_note_preprocessing)
if not pp_config.remove_numeric.perform:
pp_config.remove_numeric.set_value(
"replace_numerics_with_letter", False
)
if not pp_config.remove_stopwords.perform:
pp_config.remove_stopwords = Config(
dic={
"perform": True,
"params": Config(
dic={
"stopwords_file_path": None,
"remove_common_medical_terms": True,
}
),
}
)
if not pp_config.stem_or_lemmatize.perform:
pp_config.stem_or_lemmatize = Config(
dic={
"perform": True,
"params": Config(
dic={"stemmer_name": "nltk.WordNetLemmatizer"}
),
}
)
preprocessor = ClinicalNotePreprocessor(pp_config)
# Restore the original preprocessor config back
preprocessor._config.remove_stopwords.set_value(
"perform", config.clinical_note_preprocessing.remove_stopwords.perform
)
preprocessor._config.stem_or_lemmatize.set_value(
"perform", config.clinical_note_preprocessing.stem_or_lemmatize.perform
)
# 2. Load dataset
dataset = ConfigMapper.get_object("datasets", config.dataset.name)(
config.dataset.params
)
# 3. Load model
model_dict = {}
lig_dict = {}
if hasattr(config, "models"):
model_configs = config.models
else:
model_configs = [config]
for model_config in model_configs:
model = ConfigMapper.get_object("models", model_config.model.name)(
model_config.model.params
)
# Load ckpt
ckpt_saver = ConfigMapper.get_object(
"checkpoint_savers", model_config.checkpoint_saver.name
)(model_config.checkpoint_saver.params)
best_ckpt = ckpt_saver.get_best_checkpoint()
if best_ckpt is None:
raise ValueError("Best ckpt not found.")
ckpt_saver.load_ckpt(model, best_ckpt, optimizer=None)
# Set model GPU & eval mode
if config.demo.use_gpu:
model.cuda()
model.eval()
# 4. Captum attribute module
try:
embed_layer_name = getattr(
model_config.model, "embed_layer_name", "embed"
)
embed_layer = getattr(model, embed_layer_name)
except:
raise ValueError(
f"Config for {model_config.model.name} does not"
"specify name of the embedding layer."
)
lig = LayerIntegratedGradients(model, embed_layer)
model_dict[model_config.model.name] = model
lig_dict[model_config.model.name] = lig
return preprocessor, dataset, model_dict, lig_dict
@st.cache(hash_funcs=hash_funcs, allow_output_mutation=True)
def load_icd_desc(config):
# Load ICD description (dict of {code: desc})
icd_desc = list(csv.reader(open(config.demo.icd_desc_file), delimiter="\t"))
icd_desc = {r[0]: r[1] for r in icd_desc}
return icd_desc
# Page setup
st.set_page_config(
page_title="ICD Coding Interactive Demo",
page_icon=":syringe:",
layout="wide",
)
st.markdown(
"""
<style>
div.stButton > button:first-child {
background-color: rgb(255, 75, 75);
color: white;
width: 100%;
border: 0px;
padding-right: 20px;
}
.streamlit-expanderHeader { font-size: medium; }
</style>""",
unsafe_allow_html=True,
)
# Title & status line
st.title("🩺 ICD Coding Interactive Demo")
status = st.empty()
# Displaying status
def set_status(text):
status.text(f"💡 {text}")
set_status("Loading model...")
# App info
info_str = """
- This is an interactive app to run automatic diagnostic coding models and
visualize the diagnosis code prediction with the importance score of the
input.
- To run the model, please put a discharge summary in the "Discharge summary
note" box and hit the Submit button. Try different options of preprocessing
and visualization!
- To run other models, please specify the config (in `configs/demo/`) of an
available model from the command-line argument. Checkpoints of the
corresponding models need to be downloaded.
- For more help, please check out our
[ICD Coding Benchmark](https://github.com/dalgu90/icd-coding-benchmark) repo.
Thanks!
"""
with st.expander("ℹ️ About the app", expanded=False):
st.write(info_str)
st.markdown("")
# Load config, modules, and icd descriptions
config = load_config()
preprocessor, dataset, model_dict, lig_dict = load_modules(config)
icd_desc = load_icd_desc(config)
set_status(f"Model loaded ({', '.join(model_dict.keys())})")
# Main form
with st.form("my_form"):
# Layout
_, col1, _, col2, _ = st.columns([0.07, 1, 0.07, 4, 0.07])
with col1:
# Model selection
# TODO: Support multiple models
model_name = st.radio(
"Choose model",
list(model_dict.keys()),
help="""Currently, running multiple models is not supported.""",
)
# K selection
top_k = st.slider(
"Top-k prediction",
min_value=1,
max_value=min(50, len(icd_desc)),
value=config.demo.top_k,
help="""The number of predictions with highest scores. Between 1 and
maximum number of output codes (or 50 if label space is too
too large).""",
)
# Input visualization selection
vis_score_options = [
"NO",
"Integrated Gradients",
]
if any(
hasattr(model, "get_input_attention")
for model in model_dict.values()
):
vis_score_options.append("Attention score")
vis_score = st.radio(
"Visualize attribution score",
vis_score_options,
help="""Interpretability visualization methods. Attention score is
available only for attention-based models.""",
)
vis_code_options = ["Choose ICD code"]
vis_code_options += dataset.decode_labels(range(dataset.num_labels))
vis_code = st.selectbox(
"ICD code to compute attribution score",
vis_code_options,
index=0,
help="""Code to visualize the attribution. It will be used when the
interpretability method is other than "NO".""",
)
# Preprocessing option selection (truncation is not controlled)
st.markdown(
"""<p style="font-size: small;"> Preprocessing </p>""",
unsafe_allow_html=True,
)
pp_config = config.clinical_note_preprocessing
pp_lower_case = st.checkbox(
"Lowercase",
value=pp_config.to_lower.perform,
)
pp_remove_punc = st.checkbox(
"Remove punctuation",
value=pp_config.remove_punctuation.perform,
)
pp_remove_numeric = st.checkbox(
"Remove numeric words",
value=pp_config.remove_numeric.perform,
)
pp_remove_stopwords = st.checkbox(
"Remove stopwords",
value=pp_config.remove_stopwords.perform,
)
pp_stem = st.checkbox(
"Stem / lemmatize words",
value=pp_config.stem_or_lemmatize.perform,
)
submitted = st.form_submit_button("🚀 SUBMIT!")
with col2:
# Input text
css_str = "line-height:1; margin-top:1rem; margin-bottom:-2rem;"
st.markdown(
f"""<div style="{css_str}">Discharge summary note</div>""",
unsafe_allow_html=True,
)
input_text = st.text_area(label="", height=200)
# input_text = st.text_area(label="Discharge summary note", height=200)
input_text = input_text.strip()
if input_text:
set_status("Processing...")
# Preprocess text
with st.expander("Preprocessed text", expanded=False):
preprocessor._config.to_lower.set_value("perform", pp_lower_case)
preprocessor._config.remove_punctuation.set_value(
"perform", pp_remove_punc
)
preprocessor._config.remove_numeric.set_value(
"perform", pp_remove_numeric
)
preprocessor._config.remove_stopwords.set_value(
"perform", pp_remove_stopwords
)
preprocessor._config.stem_or_lemmatize.set_value("perform", pp_stem)
preprocessed_text = preprocessor(input_text)
st.text_area(
label="Preprocessed note",
value=preprocessed_text,
height=200,
disabled=True,
)
with st.expander("Input tokens", expanded=False):
# Tokenize text with vocab
token_idxs = dataset.encode_tokens(preprocessed_text.split())
tokens = dataset.decode_tokens(token_idxs)
token_text = " ".join(tokens)
st.text_area(
label="Tokens", value=token_text, height=200, disabled=True
)
# Model prediction
st.write("ICD code prediction")
if token_idxs and len(token_idxs) >= config.demo.min_input_len:
model = model_dict[model_name]
# Forward pass
batch_input = torch.tensor([token_idxs])
if config.demo.use_gpu:
batch_input = batch_input.cuda()
with torch.no_grad():
batch_output = model(batch_input)
probs = torch.sigmoid(batch_output[0].cpu()).numpy()
top_k_preds = np.argsort(probs)[-1 : -(top_k + 1) : -1]
top_k_probs = [probs[p] for p in top_k_preds]
top_k_codes = dataset.decode_labels(top_k_preds)
top_k_descs = [icd_desc[c] for c in top_k_codes]
# Output as table
output_df = pd.DataFrame(
{
"ICD_Code": top_k_codes,
"Probability": top_k_probs,
"Description": top_k_descs,
}
)
output_df.index += 1
cmap = sns.light_palette("#AC304B", as_cmap=True)
output_df = output_df.style.background_gradient(
cmap=cmap, subset=["Probability"], vmin=0.0, vmax=1.0
).format({"Probability": "{:.4f}"})
st.table(output_df)
# Attribution score:
target_label = vis_code_options.index(vis_code) - 1 # starts from 1
with st.expander(f"Attribution score", expanded=True):
if vis_score == "NO":
st.markdown("**[No attribution method selected]**")
elif target_label == -1:
st.markdown("**[No ICD code selected]**")
elif vis_score == "Attention score" and not hasattr(
model, "get_input_attention"
):
st.markdown("**[Model does not support attention score]**")
else:
if vis_score == "Integrated Gradients":
lig = lig_dict[model_name]
attrs, approx_error = lig.attribute(
batch_input,
target=target_label,
return_convergence_delta=True,
)
attrs = attrs.sum(dim=2).squeeze(0)
attrs = (
(attrs / torch.norm(attrs)).cpu().detach().numpy()
)
elif vis_score == "Attention score":
attrs = model.get_input_attention()
attrs = attrs[:, target_label].squeeze(0)
attrs /= np.linalg.norm(attrs)
else:
raise ValueError(f"Wrong model selected.")
assert len(attrs) >= len(tokens)
html_string = html_word_importance(tokens, attrs)
st.markdown(
f"**{vis_score}** for **{vis_code}** "
f"({icd_desc[vis_code]})"
)
st.markdown(html_string, unsafe_allow_html=True)
st.markdown("")
elif token_idxs and len(token_idxs) < config.demo.min_input_len:
st.markdown(f"**[Input too short(<{config.demo.min_input_len})]**")
else:
st.markdown("**[No input]**")
if input_text:
set_status("Done!")
| 13,915 | 33.877193 | 80 | py |
icd-coding-benchmark | icd-coding-benchmark-main/run.py | # Imports
import argparse
import os
import pandas
import torch
from torchsummaryX import summary
from anemic.utils.configuration import Config
from anemic.utils.import_related_ops import pandas_related_ops
from anemic.utils.mapper import ConfigMapper
from anemic.utils.misc import seed
pandas_related_ops()
# Command line arguments
parser = argparse.ArgumentParser(description="Train or test the model")
parser.add_argument(
"--config_path", type=str, action="store", help="Path to the config file"
)
parser.add_argument(
"--test",
action="store_true",
help="Whether to use validation data or test data",
default=False,
)
parser.add_argument(
"--model_summary",
action="store_true",
help="Whether to print model summary. Note that this is supported only for "
"models which take in a 2D input. This will be extended later",
default=False,
)
args = parser.parse_args()
# Config
config = Config(path=args.config_path)
if not args.test: # Training
# Seed
seed(config.trainer.params.seed)
# Load dataset
train_data = ConfigMapper.get_object("datasets", config.dataset.name)(
config.dataset.params.train
)
val_data = ConfigMapper.get_object("datasets", config.dataset.name)(
config.dataset.params.val
)
# Model
model = ConfigMapper.get_object("models", config.model.name)(
config.model.params
)
if args.model_summary:
summary(
model, torch.randint(low=0, high=50000, size=(1, 20)).to(torch.long)
)
# Trainer
trainer = ConfigMapper.get_object("trainers", config.trainer.name)(
config.trainer.params
)
# Train!
trainer.train(model, train_data, val_data)
else: # Test
# Load dataset
test_data = ConfigMapper.get_object("datasets", config.dataset.name)(
config.dataset.params.test
)
# Model
model = ConfigMapper.get_object("models", config.model.name)(
config.model.params
)
# Trainer
trainer = ConfigMapper.get_object("trainers", config.trainer.name)(
config.trainer.params
)
# Test!
trainer.test(model, test_data)
| 2,156 | 23.793103 | 80 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/modules/losses.py | """All criterion functions."""
import json
import os
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from anemic.utils.file_loaders import load_json
from anemic.utils.mapper import ConfigMapper
ConfigMapper.map("losses", "mse")(MSELoss)
ConfigMapper.map("losses", "CrossEntropyLoss")(CrossEntropyLoss)
@ConfigMapper.map("losses", "BinaryCrossEntropyLoss")
class BinaryCrossEntropyLoss(BCEWithLogitsLoss):
def __init__(self, config):
self.config = config
super().__init__(**(config.as_dict() if config else {}))
def forward(self, input, target):
if target.dtype != torch.float:
target = target.float()
return super().forward(input=input, target=target)
@ConfigMapper.map("losses", "BinaryCrossEntropyWithLabelSmoothingLoss")
class BinaryCrossEntropyWithLabelSmoothingLoss(BCEWithLogitsLoss):
def __init__(self, config):
self.config = config
config_dict = config.as_dict()
self.alpha = config_dict.pop("alpha")
super().__init__(**(config_dict if config else {}))
def forward(self, input, target):
if target.dtype != torch.float:
target = target.float()
target = target * (1 - self.alpha) + self.alpha / target.size(1)
return super().forward(input=input, target=target)
@ConfigMapper.map("losses", "LDAMLoss")
class LDAMLoss(BCEWithLogitsLoss):
def __init__(self, config):
config_dict = config.as_dict()
label_freq_path = os.path.join(
config_dict.pop("label_freq_json_dir"),
config_dict.pop("label_freq_json_name"),
)
label_freq = list(load_json(label_freq_path).values())
self.class_margin = (
torch.tensor(label_freq, dtype=torch.float32) ** 0.25
)
self.class_margin = self.class_margin.masked_fill(
self.class_margin == 0, 1
)
self.class_margin = 1.0 / self.class_margin
self.C = config_dict.pop("C")
super().__init__(**(config_dict if config_dict else {}))
def forward(self, input, target):
device = input.get_device()
if device == -1:
device = "cpu"
target = target.to(device)
self.class_margin = self.class_margin.to(device)
if target.dtype != torch.float:
target = target.float()
ldam_input = (
input
- target * Variable(self.class_margin, requires_grad=False) * self.C
)
return super().forward(input=ldam_input, target=target)
| 2,635 | 30.759036 | 80 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/modules/schedulers.py | from torch.optim.lr_scheduler import (
CosineAnnealingLR,
CosineAnnealingWarmRestarts,
CyclicLR,
LambdaLR,
ReduceLROnPlateau,
StepLR,
)
from transformers import get_linear_schedule_with_warmup
from anemic.utils.mapper import ConfigMapper
ConfigMapper.map("schedulers", "step")(StepLR)
ConfigMapper.map("schedulers", "cosineanneal")(CosineAnnealingLR)
ConfigMapper.map("schedulers", "reduceplateau")(ReduceLROnPlateau)
ConfigMapper.map("schedulers", "cyclic")(CyclicLR)
ConfigMapper.map("schedulers", "cosineannealrestart")(
CosineAnnealingWarmRestarts
)
ConfigMapper.map("schedulers", "linearwithwarmup")(
get_linear_schedule_with_warmup
)
| 674 | 28.347826 | 66 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/modules/optimizers.py | """Method containing activation functions"""
from torch.optim import SGD, Adam, AdamW
from anemic.utils.mapper import ConfigMapper
ConfigMapper.map("optimizers", "adam")(Adam)
ConfigMapper.map("optimizers", "adam_w")(AdamW)
ConfigMapper.map("optimizers", "sgd")(SGD)
| 269 | 29 | 47 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/modules/activations.py | import torch.nn as nn
from anemic.utils.mapper import ConfigMapper
ConfigMapper.map("activations", "relu")(nn.ReLU)
ConfigMapper.map("activations", "logsoftmax")(nn.LogSoftmax)
ConfigMapper.map("activations", "softmax")(nn.Softmax)
| 234 | 28.375 | 60 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/models/multirescnn.py | """
ICD Coding from Clinical Text Using Multi-Filter Residual Convolutional Neural
Network, 2020
https://github.com/foxlf823/Multi-Filter-Residual-Convolutional-Neural-Network
"""
from math import floor
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_ as xavier_uniform
from anemic.utils.mapper import ConfigMapper
from anemic.utils.model_utils import load_lookups
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
class WordRep(nn.Module):
def __init__(self, config):
super(WordRep, self).__init__()
# Embedding layer
embedding_cls = ConfigMapper.get_object("embeddings", "word2vec")
W = torch.Tensor(embedding_cls.load_emb_matrix(config.word2vec_dir))
self.embed = nn.Embedding(W.size()[0], W.size()[1], padding_idx=0)
self.embed.weight.data = W.clone()
self.feature_size = self.embed.embedding_dim
# Embedding dropout
self.embed_drop = nn.Dropout(p=config.dropout)
# Conv layer filter sizes
self.conv_dict = {
1: [self.feature_size, config.num_filter_maps],
2: [self.feature_size, 100, config.num_filter_maps],
3: [self.feature_size, 150, 100, config.num_filter_maps],
4: [self.feature_size, 200, 150, 100, config.num_filter_maps],
}
def forward(self, x):
x = self.embed(x)
x = self.embed_drop(x)
return x
class OutputLayer(nn.Module):
def __init__(self, Y, input_size):
super(OutputLayer, self).__init__()
self.U = nn.Linear(input_size, Y)
xavier_uniform(self.U.weight)
self.final = nn.Linear(input_size, Y)
xavier_uniform(self.final.weight)
def forward(self, x):
self.alpha = F.softmax(self.U.weight.matmul(x.transpose(1, 2)), dim=2)
m = self.alpha.matmul(x)
y = self.final.weight.mul(m).sum(dim=2).add(self.final.bias)
return y
@ConfigMapper.map("models", "MultiCNN")
class MultiCNN(nn.Module):
def __init__(self, config):
super(MultiCNN, self).__init__()
Y = config.num_classes
self.dicts = load_lookups(
dataset_dir=config.dataset_dir,
mimic_dir=config.mimic_dir,
static_dir=config.static_dir,
word2vec_dir=config.word2vec_dir,
version=config.version,
)
self.word_rep = WordRep(config)
if config.filter_size.find(",") == -1:
self.filter_num = 1
filter_size = int(config.filter_size)
self.conv = nn.Conv1d(
self.word_rep.feature_size,
config.num_filter_maps,
kernel_size=filter_size,
padding=floor(filter_size / 2),
)
xavier_uniform(self.conv.weight)
else:
self.filter_num = len(config.filter_size)
self.conv = nn.ModuleList()
for filter_size in config.filter_size:
filter_size = int(filter_size)
tmp = nn.Conv1d(
self.word_rep.feature_size,
config.num_filter_maps,
kernel_size=filter_size,
padding=floor(filter_size / 2),
)
xavier_uniform(tmp.weight)
self.conv.add_module("conv-{}".format(filter_size), tmp)
self.output_layer = OutputLayer(
Y, self.filter_num * config.num_filter_maps
)
def forward(self, x, target, text_inputs):
x = self.word_rep(x, target, text_inputs)
x = x.transpose(1, 2)
if self.filter_num == 1:
x = torch.tanh(self.conv(x).transpose(1, 2))
else:
conv_result = []
for tmp in self.conv:
conv_result.append(torch.tanh(tmp(x).transpose(1, 2)))
x = torch.cat(conv_result, dim=2)
y, loss = self.output_layer(x, target, text_inputs)
return y, loss
class ResidualBlock(nn.Module):
def __init__(
self, inchannel, outchannel, kernel_size, stride, use_res, dropout
):
super(ResidualBlock, self).__init__()
self.left = nn.Sequential(
nn.Conv1d(
inchannel,
outchannel,
kernel_size=kernel_size,
stride=stride,
padding=floor(kernel_size / 2),
bias=False,
),
nn.BatchNorm1d(outchannel),
nn.Tanh(),
nn.Conv1d(
outchannel,
outchannel,
kernel_size=kernel_size,
stride=1,
padding=floor(kernel_size / 2),
bias=False,
),
nn.BatchNorm1d(outchannel),
)
self.use_res = use_res
if self.use_res:
self.shortcut = nn.Sequential(
nn.Conv1d(
inchannel,
outchannel,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm1d(outchannel),
)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
out = self.left(x)
if self.use_res:
out += self.shortcut(x)
out = torch.tanh(out)
out = self.dropout(out)
return out
@ConfigMapper.map("models", "ResCNN")
class ResCNN(nn.Module):
def __init__(self, config):
super(ResCNN, self).__init__()
Y = config.num_classes
self.dicts = load_lookups(
dataset_dir=config.dataset_dir,
mimic_dir=config.mimic_dir,
static_dir=config.static_dir,
word2vec_dir=config.word2vec_dir,
version=config.version,
)
self.word_rep = WordRep(config)
self.conv = nn.ModuleList()
conv_dimension = self.word_rep.conv_dict[config.conv_layer]
for idx in range(config.conv_layer):
tmp = ResidualBlock(
conv_dimension[idx],
conv_dimension[idx + 1],
int(config.filter_size),
1,
True,
config.dropout,
)
self.conv.add_module("conv-{}".format(idx), tmp)
self.output_layer = OutputLayer(Y, config.num_filter_maps)
def forward(self, x):
x = self.word_rep(x)
x = x.transpose(1, 2)
for conv in self.conv:
x = conv(x)
x = x.transpose(1, 2)
y, loss = self.output_layer(x)
return y, loss
@ConfigMapper.map("models", "multirescnn")
class MultiResCNN(nn.Module):
def __init__(self, config):
super(MultiResCNN, self).__init__()
Y = config.num_classes
self.dicts = load_lookups(
dataset_dir=config.dataset_dir,
mimic_dir=config.mimic_dir,
static_dir=config.static_dir,
word2vec_dir=config.word2vec_dir,
version=config.version,
)
self.word_rep = WordRep(config)
self.conv = nn.ModuleList()
self.filter_num = len(config.filter_size)
for filter_size in config.filter_size:
one_channel = nn.ModuleList()
tmp = nn.Conv1d(
self.word_rep.feature_size,
self.word_rep.feature_size,
kernel_size=filter_size,
padding=floor(filter_size / 2),
)
xavier_uniform(tmp.weight)
one_channel.add_module("baseconv", tmp)
conv_dimension = self.word_rep.conv_dict[config.conv_layer]
for idx in range(config.conv_layer):
tmp = ResidualBlock(
conv_dimension[idx],
conv_dimension[idx + 1],
filter_size,
1,
True,
config.dropout,
)
one_channel.add_module("resconv-{}".format(idx), tmp)
self.conv.add_module("channel-{}".format(filter_size), one_channel)
self.output_layer = OutputLayer(
Y, self.filter_num * config.num_filter_maps
)
def forward(self, x):
x = self.word_rep(x)
x = x.transpose(1, 2)
conv_result = []
for conv in self.conv:
tmp = x
for idx, md in enumerate(conv):
if idx == 0:
tmp = torch.tanh(md(tmp))
else:
tmp = md(tmp)
tmp = tmp.transpose(1, 2)
conv_result.append(tmp)
x = torch.cat(conv_result, dim=2)
y = self.output_layer(x)
return y
def get_input_attention(self):
# Use the attention score computed in the forward pass
return self.output_layer.alpha.cpu().detach().numpy()
| 8,926 | 30.10453 | 79 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/models/dcan.py | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from torch.nn.utils import weight_norm as weight_norm_
from anemic.utils.mapper import ConfigMapper
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
@ConfigMapper.map("models", "dcan")
class DCAN(nn.Module):
"""
This class is used to create the DCAN model.
References:
Paper: https://aclanthology.org/2020.clinicalnlp-1.8/
GitHub Repository: https://github.com/shaoxiongji/DCAN
For the parameters related to convolutional layers, please see this:
https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html.
Args:
num_classes (int): Number of classes (ICD codes).
conv_channel_sizes (list): List of lists of integers. Each list
represents the channel sizes of convolutional
layers in a `TemporalBlock`. So, for example,
if the list is [[100, 600, 600],
[600, 600, 600]].
the `TemporalConvNet` layer will have 2
`TemporalBlock`s, each temporal block have
2 convolutional layers:
Conv(100, 600), Conv(600, 600) for the first
one, and Conv(600, 600), Conv(600, 600). If
the `add_emb_size_to_channel_sizes`, we don't
have to pass the input channel size. So, in
the above case, we can just pass
[[600, 600], [600, 600, 600]].
add_emb_size_to_channel_sizes (bool): If True, you need not specify
the input channel size. Please
see the description of
`conv_channel_sizes`.
kernel_sizes (list): List of list of integers (same format as
`conv_channel_sizes`). Each integer represents the
kernel size/filter size of the respective
convolutional layer in `TemporalBlock` layer.
strides (list): List of list of integers (same format as
`conv_channel_sizes`). Each integer represents the
stride of the respective convolutional layer in
`TemporalBlock` layer.
paddings (list): List of list of integers (same format as
`conv_channel_sizes`). Each integer represents the
padding of the respective convolutional layer in
`TemporalBlock` layer. in DCAN, this value is set to
"(kernel_size - 1) * dilation_size".
dilations (list): List of list of integers (same format as
`conv_channel_sizes`). Each integer represents the
dilation size of the respective convolutional layer
`TemporalBlock` layer.` In DCAN, this value is
"2^(temporal_block_level)".
dropouts (list): List of list of floats (same format as
`conv_channel_sizes`). Each float represents the
dropout probability of the respective convolutional
`TemporalBlock` layer.
weight_norm (bool): If True, apply weight normalization to the
convolutional layers.
activation (str): Activation function to use. Should be one of "relu",
"elu", "leaky_relu".
"""
def __init__(self, config):
super(DCAN, self).__init__()
logger.info(f"Initialising {self.__class__.__name__}")
logger.debug(
f"Initialising {self.__class__.__name__} with " f"config: {config}"
)
self.config = config
self.word_embedding_layer = WordEmbeddingLayer(
**config.word_representation_layer.params.init_params.as_dict()
)
if config.word_representation_layer.params.freeze_layer:
self.freeze_layer(self.word_embedding_layer.embed)
num_levels = len(config.kernel_sizes)
num_inner_conv_levels = len(config.kernel_sizes[0])
conv_channel_sizes = copy.deepcopy(config.conv_channel_sizes)
if config.add_emb_size_to_channel_sizes:
conv_channel_sizes[0] = [
self.word_embedding_layer.embedding_size
] + conv_channel_sizes[0]
dropouts = [
[config.dropout for _ in range(num_inner_conv_levels)]
for _ in range(num_levels)
]
self.temporal_conv_net = TemporalConvNet(
conv_channel_sizes_=conv_channel_sizes,
kernel_sizes_=config.kernel_sizes,
strides_=config.strides,
paddings_=config.paddings,
dilations_=config.dilations,
dropouts_=dropouts,
weight_norm=config.weight_norm,
activation=config.activation,
)
self.linear_layer = nn.Linear(
conv_channel_sizes[-1][-1], config.projection_size
)
self.activation = ConfigMapper.get_object(
"activations", config.activation
)()
self.output_layer = OutputLayer(
config.projection_size, config.num_classes
)
xavier_uniform_(self.linear_layer.weight)
def forward(self, data):
x = self.word_embedding_layer(data)
hid_seq = self.temporal_conv_net(x.transpose(1, 2)).transpose(1, 2)
hid_seq = self.activation(self.linear_layer(hid_seq))
logits = self.output_layer(hid_seq)
return logits
def freeze_layer(self, layer):
for param in layer.parameters():
param.requires_grad = False
def get_input_attention(self):
# Use the attention score computed in the forward pass
return self.output_layer.label_wise_attn.alpha.cpu().detach().numpy()
class OutputLayer(nn.Module):
def __init__(self, input_size, num_classes):
super(OutputLayer, self).__init__()
self.label_wise_attn = LabelWiseAttn(input_size, num_classes)
self.final = nn.Linear(input_size, num_classes)
xavier_uniform_(self.final.weight)
def forward(self, x):
m = self.label_wise_attn(x)
logits = self.final.weight.mul(m).sum(dim=2).add(self.final.bias)
return logits
class WordEmbeddingLayer(nn.Module):
"""
A Word Embedding Layer. This layer loads a pre-trained word embedding matrix
, and copies its weights to an nn.Embedding layer.
Args:
embed_dir (str): A directory containing the pre-trained word embedding
matrix, among other things. Please see
https://github.com/dalgu90/icd-coding-benchmark/blob/main/anemic/modules/embeddings.py#L17
for more details.
dropout (float): The dropout probability.
"""
def __init__(self, embed_dir, dropout):
super(WordEmbeddingLayer, self).__init__()
logger.debug(
f"Initialising {self.__class__.__name__} with "
f"embed_dir = {embed_dir}, dropout = {dropout}"
)
# Note: This should be changed, since we won't always use Word2Vec.
embedding_cls = ConfigMapper.get_object("embeddings", "word2vec")
W = torch.Tensor(embedding_cls.load_emb_matrix(embed_dir))
self.embed = nn.Embedding(W.size()[0], W.size()[1], padding_idx=0)
self.embed.weight.data = W.clone()
self.embedding_size = self.embed.embedding_dim
self.dropout = nn.Dropout(dropout)
def forward(self, x):
embedding = self.embed(x)
x = self.dropout(embedding)
return x
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, : -self.chomp_size].contiguous()
class ConvTemporalSubBlock(nn.Module):
"""
A simple temporal convolutional block. Adapted from
https://github.com/shaoxiongji/DCAN/blob/master/models.py#L84-L88. This
layer has a dilated convolutional layer, a `chomp1d` layer, followed by
activation and dropout. For the parameters related to convolutional layers,
please see this:
https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html.
Args:
in_channels (int): The number of input channels in the convolutional
layer.
out_channels (int): The number of output channels in the convolutional
layer.
kernel_size (int): The size of the kernel in the convolutional layer.
stride (int): The stride of the convolutional layer.
padding (int): The padding of the convolutional layer.
dilation (int): The dilation size of the convolutional layer.
dropout (float): The dropout probability.
weight_norm (bool): Whether to apply weight normalization to the
convolutional layer.
activation (str): The activation function to use. DCAN uses "relu".
For all available activations, see
https://github.com/dalgu90/icd-coding-benchmark/blob/main/anemic/modules/activations.py.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
dropout=0.2,
weight_norm=True,
activation="relu",
):
super(ConvTemporalSubBlock, self).__init__()
logger.debug(
f"Initialising {self.__class__.__name__} with "
f"in_channels = {in_channels}, out_channels = "
f"{out_channels}, kernel_size = {kernel_size}, "
f"stride = {stride}, padding = {padding}, "
f"dilation = {dilation}, dropout = {dropout}, "
f"weight_norm = {weight_norm}, activation = {activation}"
)
self.conv_layer = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
if weight_norm:
self.conv_layer = weight_norm_(self.conv_layer)
self.chomp1d = Chomp1d(padding)
self.activation = ConfigMapper.get_object("activations", activation)()
self.dropout = nn.Dropout(dropout)
self.__init_weights__()
def __init_weights__(self):
xavier_uniform_(self.conv_layer.weight)
def forward(self, x):
x = self.conv_layer(x)
x = self.chomp1d(x)
x = self.activation(x)
x = self.dropout(x)
return x
class TemporalBlock(nn.Module):
"""
A Temporal Block containing stacks of `ConvTemporalSubBlocks`, followed
by activation.
References:
Paper: https://arxiv.org/abs/2009.14578
Repository: https://github.com/shaoxiongji/DCAN/blob/master/models.py#L81
Args:
conv_channel_sizes (list): List of integers, with channel sizes of
convolutional layers. For example, if the
list is [100, 200, 300], there will be two
convolutional layers: Conv1d(100, 200) and
Conv1d(200, 300).
kernel_sizes (list): List of integers, with kernel sizes of every
`ConvTemporalSubBlock`.
strides (list): List of integers, with strides of convolutional layers.
paddings (list): List of integers, with paddings of every
`ConvTemporalSubBlock`.
dilations (list): List of integers, with dilation sizes of every
`ConvTemporalSubBlock`.
dropouts (list): List of floats, with dropout probabilities of every
`ConvTemporalSubBlock`.
weight_norm (bool): Whether to apply weight normalization to every
convolutional layer. DCAN uses weight norm.
activation (str): The activation function to use. DCAN uses "relu".
"""
def __init__(
self,
conv_channel_sizes,
kernel_sizes,
strides,
paddings,
dilations,
dropouts,
weight_norm=True,
activation="relu",
):
super(TemporalBlock, self).__init__()
conv_channel_size_pairs = list(
zip(conv_channel_sizes[:-1], conv_channel_sizes[1:])
)
self.conv_temporal_sub_blocks = nn.ModuleList(
[
ConvTemporalSubBlock(
in_channels=conv_channel_size_pair[0],
out_channels=conv_channel_size_pair[1],
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
dropout=dropout,
weight_norm=weight_norm,
activation=activation,
)
for (
conv_channel_size_pair,
kernel_size,
stride,
padding,
dilation,
dropout,
) in zip(
conv_channel_size_pairs,
kernel_sizes,
strides,
paddings,
dilations,
dropouts,
)
]
)
self.downsample = (
nn.Conv1d(conv_channel_sizes[0], conv_channel_sizes[-1], 1)
if conv_channel_sizes[0] != conv_channel_sizes[-1]
else None
)
self.output_activation = ConfigMapper.get_object(
"activations", activation
)()
self.init_weights()
def init_weights(self):
if self.downsample is not None:
xavier_uniform_(self.downsample.weight)
def forward(self, x):
conv_layer_output = x
for conv_temporal_sub_block in self.conv_temporal_sub_blocks:
conv_layer_output = conv_temporal_sub_block(conv_layer_output)
res = x if self.downsample is None else self.downsample(x)
return self.output_activation(conv_layer_output + res)
class TemporalConvNet(nn.Module):
"""
Stack of `TemporalBlock`s. Used in the DCAN model.
References:
Paper: https://arxiv.org/abs/2009.14578
Repository: https://github.com/shaoxiongji/DCAN/blob/master/models.py#L114
Args:
conv_channel_sizes_ (list): List of lists of integers. Each list
represents the channel sizes of convolutional
layers in a `TemporalBlock`. So, for
example, if the list is [[100, 600, 600],
[600, 600, 600]].
the `TemporalConvNet` layer will have 2
`TemporalBlock`s, each temporal block have
2 convolutional layers:
Conv(100, 600), Conv(600, 600) for the first
one, and Conv(600, 600), Conv(600, 600). If
the `add_emb_size_to_channel_sizes`, we
don't have to pass the input channel size.
So, in the above case, we can just pass
[[600, 600], [600, 600, 600]].
kernel_sizes_ (list): List of list of integers (same format as
`conv_channel_sizes`). Each integer represents the
kernel size/filter size of the respective
convolutional layer in `TemporalBlock` layer.
strides_ (list): List of list of integers (same format as
`conv_channel_sizes`). Each integer represents the
stride of the respective convolutional layer in
`TemporalBlock` layer.
paddings_ (list): List of list of integers (same format as
`conv_channel_sizes`). Each integer represents the
padding of the respective convolutional layer in
`TemporalBlock` layer. in DCAN, this value is set to
"(kernel_size - 1) * dilation_size".
dilations_ (list): List of list of integers (same format as
`conv_channel_sizes`). Each integer represents the
dilation size of the respective convolutional layer
`TemporalBlock` layer.` In DCAN, this value is
"2^(temporal_block_level)".
dropouts_ (list): List of list of floats (same format as
`conv_channel_sizes`). Each float represents the
dropout probability of the respective convolutional
`TemporalBlock` layer.
weight_norm (bool): If True, apply weight normalization to the
convolutional layers.
activation (str): Activation function to use. DCAN uses "relu".
"""
def __init__(
self,
conv_channel_sizes_,
kernel_sizes_,
strides_,
paddings_,
dilations_,
dropouts_,
weight_norm=True,
activation="relu",
):
super(TemporalConvNet, self).__init__()
logger.debug(
f"Initialising {self.__class__.__name__} with "
f"conv_channel_sizes_ = {conv_channel_sizes_}, "
f"kernel_sizes_ = {kernel_sizes_}, "
f"strides_ = {strides_}, paddings_ = {paddings_}, "
f"dilations_ = {dilations_}, dropouts_ = {dropouts_}, "
f"weight_norm = {weight_norm}, activation = {activation}"
)
self.temporal_blocks = nn.ModuleList(
[
TemporalBlock(
conv_channel_sizes=conv_channel_sizes,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
dilations=dilations,
dropouts=dropouts,
weight_norm=weight_norm,
activation=activation,
)
for (
conv_channel_sizes,
kernel_sizes,
strides,
paddings,
dilations,
dropouts,
) in zip(
conv_channel_sizes_,
kernel_sizes_,
strides_,
paddings_,
dilations_,
dropouts_,
)
]
)
def forward(self, x):
for temporal_block in self.temporal_blocks:
x = temporal_block(x)
return x
class LabelWiseAttn(nn.Module):
"""
A Label-wise Attention layer (as implemented in CAML, DCAN, etc.).
References:
Papers: https://arxiv.org/abs/1802.05695 (Section 2.2)
Repository: https://github.com/jamesmullenbach/caml-mimic/blob/master/learn/models.py#L184
Args:
input_size (int): The size of the input, i.e., the number of channels
if the output is from a convolutional layer/embedding
size if the output is from a fully connected layer.
num_classes (int): The number of classes.
"""
def __init__(self, input_size, num_classes):
super(LabelWiseAttn, self).__init__()
logger.debug(
f"Initialising {self.__class__.__name__} with "
f"input size = {input_size}, num_classes = {num_classes}"
)
self.U = nn.Linear(input_size, num_classes)
xavier_uniform_(self.U.weight)
def forward(self, x):
att = self.U.weight.matmul(x.transpose(1, 2)) # [bs, Y, seq_len]
self.alpha = F.softmax(att, dim=2)
m = self.alpha.matmul(x) # [bs, Y, dim]
return m
| 20,705 | 39.759843 | 115 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/models/transicd.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from anemic.utils.mapper import ConfigMapper
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
@ConfigMapper.map("models", "transicd")
class TransICD(nn.Module):
def __init__(self, config):
super(TransICD, self).__init__()
logger.info(f"Initialising {self.__class__.__name__}")
logger.debug(
f"Initialising {self.__class__.__name__} with " f"config: {config}"
)
self.word_embedding_layer = WordEmbeddingLayer(
embed_dir=config.embed_dir
)
self.embed_size = self.word_embedding_layer.embedding_size
self.dropout = nn.Dropout(config.dropout)
self.positional_embedding_layer = PositionalEmbeddingLayer(
d_model=self.embed_size,
dropout=config.dropout,
max_len=config.max_len,
)
if self.embed_size % config.num_heads != 0:
raise ValueError(
f"Embedding size {self.embed_size} needs to be divisible by "
f"the number of heads {config.num_heads}"
)
if config.freeze_embedding_layer:
self.freeze_layer(self.word_embedding_layer)
self.pad_idx = config.pad_idx
self.num_classes = config.num_classes
encoder_layer = nn.TransformerEncoderLayer(
d_model=self.embed_size,
nhead=config.num_heads,
dim_feedforward=config.transformer_ff_up_scale_factor_for_hidden_dim
* self.embed_size,
dropout=config.dropout,
)
self.encoder = nn.TransformerEncoder(
encoder_layer=encoder_layer, num_layers=config.num_layers
)
self.label_attention_layer = LabelAttentionLayer(
embed_size=self.embed_size,
num_classes=config.num_classes,
attn_expansion=config.attn_expansion,
)
# The official code (and paper) has a separate linear layer for every
# code. This is different from the convention; generally, a shared
# linear layer is used.
# self.ff_layers = nn.ModuleList(
# [
# nn.Linear(self.embed_size, 1)
# for code in range(config.num_classes)
# ]
# )
# Trick: Use one linear layer as per-code linear layers
self.ff_layer = nn.Linear(self.embed_size, config.num_classes)
def freeze_layer(self, layer):
for param in layer.parameters():
param.requires_grad = False
def forward(self, inputs):
# `inputs` shape: (batch_size, seq_len)
batch_size = inputs.shape[0]
# `attn_mask` shape: (batch_size, seq_len, 1)
attn_mask = (inputs != self.pad_idx).unsqueeze(2)
# `src_key_padding_mask` shape: (batch_size, seq_len)
src_key_padding_mask = inputs == self.pad_idx
# Look up the embeddings of the tokens present in the input.
# `embeddings` shape: (batch_size, seq_len, embed_size)
embeddings = self.word_embedding_layer(inputs)
# The authors do some sort of scaling here - they multiply the
# embeddings with the square root of `embed_size`.
embeddings = embeddings * math.sqrt(self.embed_size)
# Add the positional embedding to the word embedding.
embeddings = self.positional_embedding_layer(embeddings)
embeddings = self.dropout(embeddings)
# `embeddings` shape: (seq_len, batch_size, embed_size)
embeddings = embeddings.permute(1, 0, 2)
# Pass the embedded input through the Transformer model.
# `encoded_inputs` shape: (batch_size, seq_len, embed_size)
encoded_inputs = self.encoder(
embeddings, src_key_padding_mask=src_key_padding_mask
)
encoded_inputs = encoded_inputs.permute(1, 0, 2)
# `weighted_outputs` shape: (batch_size, num_classes, embed_size)
weighted_outputs, self.attn_weights = self.label_attention_layer(
encoded_inputs, attn_mask
)
# outputs = torch.zeros(batch_size, self.num_classes).to(inputs.device)
# for code, ff_layer in enumerate(self.ff_layers):
# outputs[:, code : code + 1] = ff_layer(weighted_outputs[:, code])
# Trick: Use one linear layer as per-code linear layers
outputs = (weighted_outputs * self.ff_layer.weight).sum(axis=2)
outputs += self.ff_layer.bias
return outputs
def get_input_attention(self):
# Use the attention score computed in the forward pass
return self.attn_weights.cpu().detach().numpy()
class WordEmbeddingLayer(nn.Module):
"""
A Word Embedding Layer. This layer loads a pre-trained word embedding matrix
, and copies its weights to an nn.Embedding layer.
Args:
embed_dir (str): A directory containing the pre-trained word embedding
matrix, among other things. Please see
https://github.com/dalgu90/icd-coding-benchmark/blob/main/anemic/modules/embeddings.py#L17
for more details.
"""
def __init__(self, embed_dir):
super(WordEmbeddingLayer, self).__init__()
logger.debug(
f"Initialising {self.__class__.__name__} with "
f"embed_dir = {embed_dir}"
)
# Note: This should be changed, since we won't always use Word2Vec.
embedding_cls = ConfigMapper.get_object("embeddings", "word2vec")
W = torch.Tensor(embedding_cls.load_emb_matrix(embed_dir))
self.embed = nn.Embedding(W.size()[0], W.size()[1], padding_idx=0)
self.embed.weight.data = W.clone()
self.embedding_size = self.embed.embedding_dim
def forward(self, x):
embedding = self.embed(x)
return embedding
class PositionalEmbeddingLayer(nn.Module):
"""
A layer for implementing position embeddings for transformers. This layer
is inspired from https://nlp.seas.harvard.edu/2018/04/03/attention.html. For
an intuitive explanation of how this layer works, please see
https://kazemnejad.com/blog/transformer_architecture_positional_encoding/.
Args:
d_model (int): Input embedding size. Defaults to 512.
dropout (float): Dropout probability.
max_len (int): Maximum length of the input sequence.
"""
def __init__(self, d_model=128, dropout=0.1, max_len=2500):
super(PositionalEmbeddingLayer, self).__init__()
logger.debug(
f"Initialising {self.__class__.__name__} with "
f"d_model = {d_model}, dropout = {dropout}, max_len = {max_len}, "
)
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
positional_emb = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)
)
positional_emb[:, 0::2] = torch.sin(position * div_term)
positional_emb[:, 1::2] = torch.cos(position * div_term)
# Revisit this step. In the official TransICD code, they take the
# transpose: positional_emb.unsqueeze(0).transpose(0, 1). Not sure if
# that makes sense. The shape changes from (bsz, max_len, d_model) to
# (max_len, bsz, d_model).
positional_emb = positional_emb.unsqueeze(0)
self.register_buffer("positional_emb", positional_emb)
def forward(self, x):
x = x + self.positional_emb[:, : x.size(1)]
return self.dropout(x)
class LabelAttentionLayer(nn.Module):
"""
This layer implements the label attention mechanism, i.e., it computes label
attention for every ICD code.
Note: The official TransICD code has two layers for attention:
https://github.com/biplob1ly/TransICD/blob/main/code/models.py#L8,
https://github.com/biplob1ly/TransICD/blob/main/code/models.py#L36.
They use the first one in the TransICD model, whereas the second one isn't
used anywhere in the code.
Args:
embed_size (int): Input embedding size. Defaults to 128.
num_classes (int): Number of ICD codes, i.e., output size of the output
linear layer. Defaults to 50.
attn_expansion (int): Factor for scaling up the input embeddings.
Defaults to 2.
"""
def __init__(self, embed_size=128, num_classes=50, attn_expansion=2):
super(LabelAttentionLayer, self).__init__()
logger.debug(
f"Initialising {self.__class__.__name__} with "
f"embed_size = {embed_size}, num_classes = {num_classes}, "
f"attn_expansion = {attn_expansion}"
)
self.linear_layer_1 = nn.Linear(
in_features=embed_size, out_features=embed_size * attn_expansion
)
self.tanh_activation = nn.Tanh()
self.linear_layer_2 = nn.Linear(
in_features=embed_size * attn_expansion, out_features=num_classes
)
self.softmax_activation = nn.Softmax(dim=1)
def forward(self, hidden, attn_mask=None):
# `hidden` shape: (batch_size, seq_len, embed_size)
# `output_1` shape: (batch_size, seq_len, (attn_expansion x embed_size))
output_1 = self.linear_layer_1(hidden)
output_1 = self.tanh_activation(output_1)
# `output_2` shape: (batch_size, seq_len, num_classes)
output_2 = self.linear_layer_2(output_1)
# Masked fill to avoid softmaxing over padded words. The authors fill
# the value with -1e9, which is probably incorrect. It should be 1e-9.
if attn_mask is not None:
output_2 = output_2.masked_fill_(mask=attn_mask == 0, value=-1e9)
# `attn_weights` shape: (batch_size, num_classes, seq_len)
attn_weights = self.softmax_activation(output_2).transpose(1, 2)
# `weighted_outputs` shape: (batch_size, num_classes, embed_size)
weighted_output = attn_weights @ hidden
return weighted_output, attn_weights
| 10,178 | 37.411321 | 115 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/models/caml.py | """
CAML model (Mullenbach et al. 2018)
https://github.com/jamesmullenbach/caml-mimic
"""
from math import floor
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.init import xavier_uniform
from anemic.utils.mapper import ConfigMapper
from anemic.utils.model_utils import load_lookups, pad_desc_vecs
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
# From learn/models.py
class BaseModel(nn.Module):
def __init__(self, config):
super(BaseModel, self).__init__()
self.config = config
self.Y = config.num_classes
self.embed_drop = nn.Dropout(p=config.dropout)
self.dicts = load_lookups(
dataset_dir=config.dataset_dir,
mimic_dir=config.mimic_dir,
static_dir=config.static_dir,
word2vec_dir=config.word2vec_dir,
version=config.version,
)
# make embedding layer
embedding_cls = ConfigMapper.get_object("embeddings", "word2vec")
W = torch.Tensor(embedding_cls.load_emb_matrix(config.word2vec_dir))
self.embed = nn.Embedding(W.size()[0], W.size()[1], padding_idx=0)
self.embed.weight.data = W.clone()
def embed_descriptions(self, desc_data):
# label description embedding via convolutional layer
# number of labels is inconsistent across instances, so have to iterate
# over the batch
# Whether the model is using GPU
gpu = next(self.parameters()).is_cuda
b_batch = []
for inst in desc_data:
if len(inst) > 0:
if gpu:
lt = Variable(torch.cuda.LongTensor(inst))
else:
lt = Variable(torch.LongTensor(inst))
d = self.desc_embedding(lt)
d = d.transpose(1, 2)
d = self.label_conv(d)
d = F.max_pool1d(F.tanh(d), kernel_size=d.size()[2])
d = d.squeeze(2)
b_inst = self.label_fc1(d)
b_batch.append(b_inst)
else:
b_batch.append([])
return b_batch
def _compare_label_embeddings(self, target, b_batch, desc_data):
# description regularization loss
# b is the embedding from description conv
# iterate over batch because each instance has different # labels
diffs = []
for i, bi in enumerate(b_batch):
ti = target[i]
inds = torch.nonzero(ti.data).squeeze().cpu().numpy()
zi = self.final.weight[inds, :]
diff = (zi - bi).mul(zi - bi).mean()
# multiply by number of labels to make sure overall mean is balanced
# with regard to number of labels
diffs.append(self.config.lmbda * diff * bi.size()[0])
return diffs
@ConfigMapper.map("models", "CAML")
class ConvAttnPool(BaseModel):
def __init__(self, config):
cls_name = self.__class__.__name__
logger.info(f"Initializing {cls_name}")
logger.debug(f"Initializing {cls_name} with config: {config}")
super(ConvAttnPool, self).__init__(config=config)
self.pad_idx = self.dicts["w2ind"][config.pad_token]
self.unk_idx = self.dicts["w2ind"][config.unk_token]
# initialize conv layer as in 2.1
self.conv = nn.Conv1d(
config.embed_size,
config.num_filter_maps,
kernel_size=config.kernel_size,
padding=int(floor(config.kernel_size / 2)),
)
xavier_uniform(self.conv.weight)
# context vectors for computing attention as in 2.2
self.U = nn.Linear(config.num_filter_maps, self.Y)
xavier_uniform(self.U.weight)
# final layer: create a matrix to use for the L binary classifiers as in
# 2.3
self.final = nn.Linear(config.num_filter_maps, self.Y)
xavier_uniform(self.final.weight)
# initialize with trained code embeddings if applicable
if config.init_code_emb:
if config.embed_size != config.num_filter_maps:
logger.warn(
"Cannot init attention vectors since the dimension differ"
"from the dimension of the embedding"
)
else:
self._code_emb_init()
# also set conv weights to do sum of inputs
weights = (
torch.eye(config.embed_size)
.unsqueeze(2)
.expand(-1, -1, config.kernel_size)
/ config.kernel_size
)
self.conv.weight.data = weights.clone()
self.conv.bias.data.zero_()
# conv for label descriptions as in 2.5
# description module has its own embedding and convolution layers
if config.lmbda > 0:
W = self.embed.weight.data
self.desc_embedding = nn.Embedding(
W.size()[0], W.size()[1], padding_idx=0
)
self.desc_embedding.weight.data = W.clone()
self.label_conv = nn.Conv1d(
config.embed_size,
config.num_filter_maps,
kernel_size=config.kernel_size,
padding=int(floor(config.kernel_size / 2)),
)
xavier_uniform(self.label_conv.weight)
self.label_fc1 = nn.Linear(
config.num_filter_maps, config.num_filter_maps
)
xavier_uniform(self.label_fc1.weight)
# Pre-process the code description into word idxs
self.dv_dict = {}
ind2c = self.dicts["ind2c"]
w2ind = self.dicts["w2ind"]
desc_dict = self.dicts["desc"]
for i, c in ind2c.items():
desc_vec = [
w2ind[w] if w in w2ind else self.unk_idx
for w in desc_dict[c]
]
self.dv_dict[i] = desc_vec
def _code_emb_init(self):
# In the original CAML repo, this method seems not being called.
# In this implementation, we compute the AVERAGE word2vec embeddings for
# each code and initialize the self.U and self.final with it.
ind2c = self.dicts["ind2c"]
w2ind = self.dicts["w2ind"]
desc_dict = self.dicts["desc"]
weights = torch.zeros_like(self.final.weight)
for i, c in ind2c.items():
desc_vec = [
w2ind[w] if w in w2ind else self.unk_idx
for w in desc_dict[c].split()
]
weights[i] = self.embed(torch.tensor(desc_vec)).mean(axis=0)
self.U.weight.data = torch.Tensor(weights).clone()
self.final.weight.data = torch.Tensor(weights).clone()
def forward(self, x):
# get embeddings and apply dropout
x = self.embed(x)
x = self.embed_drop(x)
x = x.transpose(1, 2)
# apply convolution and nonlinearity (tanh)
x = F.tanh(self.conv(x).transpose(1, 2))
# apply attention
self.alpha = F.softmax(self.U.weight.matmul(x.transpose(1, 2)), dim=2)
# document representations are weighted sums using the attention. Can
# compute all at once as a matmul
m = self.alpha.matmul(x)
# final layer classification
y = self.final.weight.mul(m).sum(dim=2).add(self.final.bias)
return y
def regularizer(self, labels=None):
if not self.config.lmbda:
return 0.0
# Retrive the description tokens of the labels
desc_vecs = []
for label in labels:
desc_vecs.append(
[self.dv_dict[i] for i, l in enumerate(label) if l]
)
desc_data = [np.array(pad_desc_vecs(dvs)) for dvs in desc_vecs]
# run descriptions through description module
b_batch = self.embed_descriptions(desc_data)
# get l2 similarity loss
diffs = self._compare_label_embeddings(labels, b_batch, desc_data)
diff = torch.stack(diffs).mean()
return diff
def get_input_attention(self):
# Use the attention score computed in the forward pass
return self.alpha[:, :, :-1].cpu().detach().numpy()
@ConfigMapper.map("models", "CNN")
class VanillaConv(BaseModel):
def __init__(self, config):
cls_name = self.__class__.__name__
logger.info(f"Initializing {cls_name}")
logger.debug(f"Initializing {cls_name} with config: {config}")
super(VanillaConv, self).__init__(config)
# initialize conv layer as in 2.1
self.conv = nn.Conv1d(
config.embed_size,
config.num_filter_maps,
kernel_size=config.kernel_size,
)
xavier_uniform(self.conv.weight)
# linear output
self.fc = nn.Linear(config.num_filter_maps, self.Y)
xavier_uniform(self.fc.weight)
def forward(self, x):
# embed
x = self.embed(x)
x = self.embed_drop(x)
x = x.transpose(1, 2)
# conv/max-pooling
c = self.conv(x)
x = F.max_pool1d(F.tanh(c), kernel_size=c.size()[2])
x = x.squeeze(dim=2)
# linear output
x = self.fc(x)
return x
| 9,333 | 33.828358 | 80 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/models/fusion.py | """
Fusion model (Luo et al. 2021)
https://github.com/machinelearning4health/Fusion-Towards-Automated-ICD-Coding
"""
from math import floor
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_ as xavier_uniform
from anemic.utils.mapper import ConfigMapper
from anemic.utils.model_utils import load_lookups
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
class WordRep(nn.Module):
def __init__(self, config):
super(WordRep, self).__init__()
# make embedding layer
embedding_cls = ConfigMapper.get_object("embeddings", "word2vec")
W = torch.Tensor(embedding_cls.load_emb_matrix(config.word2vec_dir))
self.embed = nn.Embedding(W.size()[0], W.size()[1], padding_idx=0)
self.embed.weight.data = W.clone()
self.feature_size = self.embed.embedding_dim
self.embed_drop = nn.Dropout(p=config.dropout)
self.conv_dict = {
1: [self.feature_size, config.num_filter_maps],
2: [self.feature_size, 100, config.num_filter_maps],
3: [self.feature_size, 150, 100, config.num_filter_maps],
4: [self.feature_size, 200, 150, 100, config.num_filter_maps],
}
def forward(self, x):
x = self.embed(x)
x = self.embed_drop(x)
return x
class AttentionBolckV2(nn.Module):
def __init__(self, inchannel, pool_size, is_max_pool=True):
super(AttentionBolckV2, self).__init__()
self.is_max_pool = is_max_pool
self.pool_size = pool_size
self.att_conv = nn.Sequential(
nn.Conv1d(inchannel, 1, kernel_size=1, stride=1, bias=False),
)
self.squeeze_pool = (
nn.MaxPool1d(pool_size, pool_size, return_indices=True)
if is_max_pool
else nn.AvgPool1d(pool_size, pool_size)
)
def forward(self, x):
if self.is_max_pool:
if x.shape[2] % self.pool_size != 0:
x = torch.nn.functional.pad(
x, [0, (self.pool_size - (x.shape[2] % self.pool_size))]
)
att = self.att_conv(x)
att = att.view(att.shape[0], att.shape[1], -1, self.pool_size)
att = torch.softmax(att, dim=3)
x = x.view(x.shape[0], x.shape[1], -1, self.pool_size)
x = x * att
x = torch.sum(x, dim=3)
else:
att = self.att_conv(x)
x = x * att
x = self.squeeze_pool(x)
return x
class ResidualBlockHidden(nn.Module):
def __init__(
self,
inchannel,
outchannel,
kernel_size,
stride,
use_res,
dropout,
use_layer_norm=False,
is_relu=True,
):
super(ResidualBlockHidden, self).__init__()
self.left = nn.Sequential(
nn.Conv1d(
inchannel,
outchannel,
kernel_size=kernel_size,
stride=stride,
padding=int(floor(kernel_size / 2)),
bias=False,
),
nn.GroupNorm(1, outchannel)
if use_layer_norm
else nn.BatchNorm1d(outchannel),
nn.Tanh() if not is_relu else nn.LeakyReLU(),
nn.Conv1d(
outchannel,
outchannel,
kernel_size=kernel_size,
stride=1,
padding=int(floor(kernel_size / 2)),
bias=False,
),
nn.GroupNorm(1, outchannel)
if use_layer_norm
else nn.BatchNorm1d(outchannel),
)
self.use_res = use_res
if self.use_res:
self.shortcut = nn.Sequential(
nn.Conv1d(
inchannel,
outchannel,
kernel_size=1,
stride=stride,
bias=False,
),
nn.GroupNorm(1, outchannel)
if use_layer_norm
else nn.BatchNorm1d(outchannel),
)
self.dropout = nn.Dropout(p=dropout)
self.out_activation = nn.Tanh() if not is_relu else nn.LeakyReLU()
def forward(self, x):
out = self.left(x)
if self.use_res:
out += self.shortcut(x)
out = self.out_activation(out)
out = self.dropout(out)
return out
class ScaledDotProductAttention(nn.Module):
"""Scaled dot-product attention mechanism."""
def __init__(self, attention_dropout=0.0):
super(ScaledDotProductAttention, self).__init__()
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, scale=None, attn_mask=None):
"""Forward propagation.
Args:
q: Queries tensor with shape [B, L_q, D_q]
k: Keys tensor with shape [B, L_k, D_k]
v: Values tensor with shape [B, L_v, D_v],generally k
scale: scale factor, a floating-point scalar
attn_mask: Masking tensor with shape [B, L_q, L_k]
Returns:
Context tensor and attention tensor
"""
attention = torch.bmm(q, k.transpose(1, 2))
if scale:
attention = attention * scale
if attn_mask is not None:
attention = attention.masked_fill_(attn_mask, -np.inf)
attention = self.softmax(attention)
attention = self.dropout(attention)
context = torch.bmm(attention, v)
return context, attention
class MultiHeadAttention(nn.Module):
def __init__(self, model_dim=512, num_heads=8, dropout=0.0):
super(MultiHeadAttention, self).__init__()
self.dim_per_head = model_dim // num_heads
self.num_heads = num_heads
self.linear_k = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.linear_v = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.linear_q = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.dot_product_attention = ScaledDotProductAttention(dropout)
self.linear_final = nn.Linear(model_dim, model_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(model_dim)
def forward(self, key, value, query, attn_mask=None):
residual = query
dim_per_head = self.dim_per_head
num_heads = self.num_heads
batch_size = key.size(0)
# linear projection
key = self.linear_k(key)
value = self.linear_v(value)
query = self.linear_q(query)
# split by heads
key = key.view(batch_size * num_heads, -1, dim_per_head)
value = value.view(batch_size * num_heads, -1, dim_per_head)
query = query.view(batch_size * num_heads, -1, dim_per_head)
if attn_mask is not None:
attn_mask = attn_mask.repeat(num_heads, 1, 1)
# scaled dot product attention
scale = (key.size(-1) // num_heads) ** -0.5
context, attention = self.dot_product_attention(
query, key, value, scale, attn_mask
)
# concat heads
context = context.view(batch_size, -1, dim_per_head * num_heads)
# final linear projection
output = self.linear_final(context)
# dropout
output = self.dropout(output)
# add residual and norm layer
output = self.layer_norm(residual + output)
return output, attention
class EncoderLayer(nn.Module):
def __init__(self, model_dim=512, num_heads=8, ffn_dim=2018, dropout=0.0):
super(EncoderLayer, self).__init__()
self.attention = MultiHeadAttention(model_dim, num_heads, dropout)
self.feed_forward = PositionalWiseFeedForward(
model_dim, ffn_dim, dropout
)
def forward(self, inputs, attn_mask=None):
# self attention
context, attention = self.attention(inputs, inputs, inputs, attn_mask)
# feed forward network
output = self.feed_forward(context)
return output, attention
class PositionalWiseFeedForward(nn.Module):
def __init__(self, model_dim=512, ffn_dim=2048, dropout=0.0):
super(PositionalWiseFeedForward, self).__init__()
self.w1 = nn.Conv1d(model_dim, ffn_dim, 1)
self.w2 = nn.Conv1d(ffn_dim, model_dim, 1)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(model_dim)
def forward(self, x):
output = x.transpose(1, 2)
output = self.w2(F.relu(self.w1(output)))
output = self.dropout(output.transpose(1, 2))
# add residual and norm layer
output = self.layer_norm(x + output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_seq_len):
"""Initializer
Args:
d_model: A scalar. The dimension of the model, defaults to 512
max_seq_len: a scalar. Maximum length of text sequence
"""
super(PositionalEncoding, self).__init__()
# According to the formula given by the paper, construct the PE matrix
position_encoding = np.array(
[
[
pos / np.power(10000, 2.0 * (j // 2) / d_model)
for j in range(d_model)
]
for pos in range(max_seq_len)
]
)
# Use sin for even columns and cos for odd columns
position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2])
position_encoding[:, 1::2] = np.cos(position_encoding[:, 1::2])
position_encoding = torch.from_numpy(
position_encoding.astype(np.float32)
)
# In the first row of the PE matrix, add a vector with all 0s,
# representing the positional encoding of this `PAD`. `UNK` is often
# added to the word embedding, which represents the word embedding of
# the position word, the two are very similar. So why do you need this
# extra PAD encoding? Quite simply, since the text sequences are of
# varying lengths, we need to align. For short sequences, we use 0 to
# complete at the end, and we also need the encoding of these completion
# positions, which is the position encoding corresponding to `PAD`.
pad_row = torch.zeros([1, d_model])
position_encoding = torch.cat((pad_row, position_encoding))
# Embedding operation, +1 is because the encoding of the completion
# position of `PAD` has been added. If the dictionary adds `UNK` in Word
# embedding, we also need +1. Look, the two are very similar
self.position_encoding = nn.Embedding(max_seq_len + 1, d_model)
self.position_encoding.weight = nn.Parameter(
position_encoding, requires_grad=False
)
def forward(self, input_len):
"""Forward propagation.
Args:
input_len: A tensor with shape [BATCH_SIZE, 1]. The value of each
tensor represents the corresponding length in this batch
of text sequences.
Returns: Returns the position code of this batch of sequences, aligned.
"""
# Find the maximum length of this batch of sequences
max_len = torch.max(input_len)
tensor = torch.LongTensor
# Align the position of each sequence and add 0 after the original
# sequence position. Here the range starts from 1 also because it is
# necessary to avoid the position of PAD(0).
pos = np.zeros([len(input_len), max_len])
for ind, length in enumerate(input_len):
for pos_ind in range(1, length + 1):
pos[ind, pos_ind - 1] = pos_ind
input_pos = tensor(pos)
if input_len.is_cuda:
input_pos = input_pos.cuda()
return self.position_encoding(input_pos), input_pos
class EncoderHidden(nn.Module):
"""
Feature aggregation layer of Fusion model.
Arguments `vocab_size` and `gpu` are removed when porting.
"""
def __init__(
self,
max_seq_len,
num_layers=1,
model_dim=256,
num_heads=4,
ffn_dim=1024,
dropout=0.0,
):
super(EncoderHidden, self).__init__()
self.encoder_layers = nn.ModuleList(
[
EncoderLayer(model_dim, num_heads, ffn_dim, 0.0)
for _ in range(num_layers)
]
)
self.pos_embedding = PositionalEncoding(model_dim, max_seq_len)
self.dropout = nn.Dropout(p=dropout)
def forward(self, output):
input_len = torch.LongTensor([x.shape[0] for x in output])
if output.is_cuda:
input_len = input_len.cuda()
output_pos, ind_pos = self.pos_embedding(input_len.unsqueeze(1))
output += output_pos
attentions = []
outputs = []
for encoder in self.encoder_layers:
output, attention = encoder(output)
attentions.append(attention)
outputs.append(output)
output = torch.tanh(output)
output = self.dropout(output)
return output
@ConfigMapper.map("models", "Fusion")
class Fusion(nn.Module):
"""
This class is for the Fusion model suggested in the following paper
(MultiResCNNHidden in the repo).
We removed the elmo from the original code since it's unnecessary.
Reference:
Paper: Junyu Luo, et al., Fusion: Towards Automated ICD Coding via
Feature Compression, in Findings of ACL 2021,
https://aclanthology.org/2021.findings-acl.184/
Repository: https://github.com/machinelearning4health/Fusion
Args (in config):
num_classes (int): Number of classes (ICD codes).
dropout (int): The dropout ratio after word embedding
filter_size (string): The concatenation of filter size of each channel
use_attention_pool (bool): Flag to use attetion-based soft pooling layer
pool_size (int): The pooling size of attention-base soft pooling layer
conv_layer (int): The number of residual convolution blocks
use_layer_norm (bool): Flag to use group norm in the residual blocks
use_relu (bool): Flag to use leaky relu (or tanh) in the residual blocks
use_transformer (bool): Flag to use feature aggregation layer
max_length (int): The maximum length of the input text. Used by the
positional encoding inside feature aggregation
transfer_layer (int): The number of transformer layer in the feature
aggregation layer
transfer_attention_head (int): The number of heads in the transformer
layers
transfer_fsize (int): The dimension of the feedforward layer in the
transformer layers
num_filter_maps (int): The feature dimension of the last feature
aggregation layer and the label attention layer
"""
def __init__(self, config):
super(Fusion, self).__init__()
logger.info("Initialising %s", self.__class__.__name__)
logger.debug(
"Initialising %s with config: %s", self.__class__.__name__, config
)
self.config = config
# From CAML implementation
self.Y = config.num_classes
self.dicts = load_lookups(
dataset_dir=config.dataset_dir,
mimic_dir=config.mimic_dir,
static_dir=config.static_dir,
word2vec_dir=config.word2vec_dir,
version=config.version,
)
self.word_rep = WordRep(config)
self.conv = nn.ModuleList()
filter_sizes = config.filter_size
self.relu = nn.ReLU(inplace=True)
self.use_transformer = config.use_transformer
self.filter_num = len(filter_sizes)
for filter_size in filter_sizes:
one_channel = nn.ModuleList()
tmp = nn.Conv1d(
self.word_rep.feature_size,
self.word_rep.feature_size,
kernel_size=filter_size,
padding=int(floor(filter_size / 2)),
)
xavier_uniform(tmp.weight)
one_channel.add_module("baseconv", tmp)
if config.use_attention_pool:
tmp = AttentionBolckV2(
self.word_rep.feature_size, config.pool_size, True
)
one_channel.add_module("basevonb-pool", tmp)
conv_dimension = self.word_rep.conv_dict[config.conv_layer]
for idx in range(config.conv_layer):
tmp = ResidualBlockHidden(
conv_dimension[idx],
conv_dimension[idx + 1],
filter_size,
1,
True,
config.dropout if config.use_transformer else 0.0,
use_layer_norm=config.use_layer_norm,
is_relu=config.use_relu,
)
one_channel.add_module("resconv-{}".format(idx), tmp)
self.conv.add_module("channel-{}".format(filter_size), one_channel)
if config.use_transformer:
self.transfer = EncoderHidden(
config.max_length,
config.transfer_layer,
self.filter_num * config.num_filter_maps,
config.transfer_attention_head,
config.transfer_fsize,
config.dropout,
)
# Label attention part of OutputLayer
self.U = nn.Linear(self.filter_num * config.num_filter_maps, self.Y)
xavier_uniform(self.U.weight)
self.final = nn.Linear(self.filter_num * config.num_filter_maps, self.Y)
xavier_uniform(self.final.weight)
def forward(self, x):
x = self.word_rep(x)
x = x.transpose(1, 2)
conv_result = []
for conv in self.conv:
tmp = x
for idx, md in enumerate(conv):
if idx == 0:
tmp = torch.tanh(md(tmp))
else:
tmp = md(tmp)
tmp = tmp.transpose(1, 2)
conv_result.append(tmp)
x = torch.cat(conv_result, dim=2)
if self.use_transformer:
x = self.transfer(x)
# Label attention part of OutputLayer
self.alpha = F.softmax(self.U.weight.matmul(x.transpose(1, 2)), dim=2)
m = self.alpha.matmul(x)
y = self.final.weight.mul(m).sum(dim=2).add(self.final.bias)
return y
def freeze_net(self):
for p in self.word_rep.embed.parameters():
p.requires_grad = False
def get_input_attention(self):
# Use the attention score computed in the forward pass
# Here we repeat the attention since the input is pooled in the pass
attention = self.alpha.repeat_interleave(self.config.pool_size, dim=2)
return attention.cpu().detach().numpy()
| 19,129 | 35.507634 | 81 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/datasets/base_dataset.py | import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from anemic.utils.file_loaders import load_csv_as_df, load_json
from anemic.utils.mapper import ConfigMapper
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
@ConfigMapper.map("datasets", "base_dataset")
class BaseDataset(Dataset):
def __init__(self, config):
self._config = config
# Load vocab (dict of {word: idx})
embedding_cls = ConfigMapper.get_object("embeddings", "word2vec")
self.vocab = embedding_cls.load_vocab(self._config.word2vec_dir)
self.vocab_size = len(self.vocab)
assert self.vocab_size == max(self.vocab.values()) + 1
self.pad_idx = self.vocab[self._config.pad_token]
self.unk_idx = self.vocab[self._config.unk_token]
self.inv_vocab = {i: w for w, i in self.vocab.items()}
# Load labels (dict of {code: idx})
label_path = os.path.join(
self._config.dataset_dir, self._config.label_file
)
self.all_labels = load_json(label_path)
self.num_labels = len(self.all_labels)
assert self.num_labels == max(self.all_labels.values()) + 1
self.inv_labels = {i: c for c, i in self.all_labels.items()}
logger.debug(
"Loaded {} ICD code labels from {}".format(
self.num_labels, label_path
)
)
# To-do: This class currently deals with only JSON files. We can extend
# this to deal with other file types (.csv, .xlsx, etc.).
# Load data (JSON)
data_path = os.path.join(
self._config.dataset_dir, self._config.data_file
)
self.df = pd.DataFrame.from_dict(load_json(data_path))
logger.info(
"Loaded dataset from {} ({} examples)".format(
data_path, len(self.df)
)
)
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
row = self.df.iloc[idx]
clinical_note = row[self._config.column_names.clinical_note]
codes = row[self._config.column_names.labels].split(";")
# Note (list) -> word idxs (UNK is assigned at the last word)
token_idxs = self.encode_tokens(clinical_note)
# ICD codes -> binary labels
labels = self.encode_labels(codes)
one_hot_labels = np.zeros(self.num_labels, dtype=np.int32)
for l in labels:
one_hot_labels[l] = 1
return (token_idxs, one_hot_labels)
def encode_tokens(self, tokens):
"""Convert list of words into list of token idxs, and truncate"""
token_idxs = [
self.vocab[w] if w in self.vocab else self.unk_idx for w in tokens
]
token_idxs = token_idxs[: self._config.max_length]
return token_idxs
def decode_tokens(self, token_idxs):
"""Convert list of token idxs into list of words"""
return [self.inv_vocab[idx] for idx in token_idxs]
def encode_labels(self, codes):
"""Convert list of ICD codes into labels"""
return [self.all_labels[c] for c in codes]
def decode_labels(self, labels):
"""Convert labels into list of ICD codes"""
return [self.inv_labels[l] for l in labels]
def collate_fn(self, examples):
"""Concatenate examples into note and label tensors"""
notes, labels = zip(*examples)
# Pad notes
max_note_len = max(map(len, notes))
notes = [
note + [self.pad_idx] * (max_note_len - len(note)) for note in notes
]
# Convert into Tensor
notes = torch.tensor(notes)
labels = torch.tensor(labels)
return notes, labels
| 3,753 | 32.81982 | 80 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/utils/checkpoint_savers.py | """
Checkpoint Saver
"""
import json
import os
import torch
from anemic.modules.metrics import load_metric
from anemic.utils.file_loaders import load_json, save_json
from anemic.utils.mapper import ConfigMapper
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
@ConfigMapper.map("checkpoint_savers", "base_saver")
class BaseCheckpointSaver(object):
def __init__(self, config):
cls_name = self.__class__.__name__
logger.debug(f"Initializing {cls_name} with config: {config}")
self.config = config
self.info_fname = "ckpt-info.json"
if hasattr(self.config, "info_fname"):
self.info_fname = self.config.info_fname
self.metric = load_metric(self.config.metric)
def save_ckpt_info(self, info):
info_fpath = os.path.join(self.config.checkpoint_dir, self.info_fname)
save_json(info, info_fpath)
def load_ckpt_info(self):
info_fpath = os.path.join(self.config.checkpoint_dir, self.info_fname)
if os.path.exists(info_fpath):
# Load json (convert int string to int)
info = load_json(info_fpath)
info["iter_ckpts"] = {
int(i): fname for i, fname in info["iter_ckpts"].items()
}
return info
else:
return {"best_ckpt": None, "iter_ckpts": {}}
def clean_up_ckpt_info(self, info):
# Check best ckpt
if info["best_ckpt"]:
ckpt_fpath = os.path.join(
self.config.checkpoint_dir, info["best_ckpt"]["fname"]
)
if not os.path.exists(ckpt_fpath):
info["best_ckpt"] = None
# Check iter based ckpts
for train_iter, ckpt_fname in info["iter_ckpts"].items():
ckpt_fpath = os.path.join(self.config.checkpoint_dir, ckpt_fname)
if not os.path.exists(ckpt_fpath):
del info["iter_ckpts"][train_iter]
return info
def get_latest_checkpoint(self):
info = self.load_ckpt_info()
info = self.clean_up_ckpt_info(info)
if info["iter_ckpts"]:
max_iter = max(info["iter_ckpts"].keys())
return max_iter, info["iter_ckpts"][max_iter]
return None
def get_best_checkpoint(self):
info = self.load_ckpt_info()
info = self.clean_up_ckpt_info(info)
return info["best_ckpt"]["fname"] if info["best_ckpt"] else None
def check_interval(self, train_iter):
return train_iter % self.config.interval == 0
def check_best(
self,
y_true=None,
y_pred=None,
p_pred=None,
metric_val=None,
return_metric=True,
):
# Compute the current metric value
if metric_val is None:
metric_val = self.metric(
y_true=y_true, y_pred=y_pred, p_pred=p_pred
)
# Compare with the best metric value
info = self.load_ckpt_info()
info = self.clean_up_ckpt_info(info)
if info["best_ckpt"]:
desired = self.config.desired
if self.config.metric.name not in info["best_ckpt"]:
metric = list(info["best_ckpt"].keys())
metric.remove("fname")
raise ValueError(
f"best_ckpt has metric {metric}, not"
f"self.config.metric.name"
)
best_val = info["best_ckpt"][self.config.metric.name]
is_best = (desired == "max" and best_val < metric_val) or (
desired == "min" and best_val > metric_val
)
else:
is_best = True
if return_metric:
return metric_val, is_best
else:
return is_best
def save_ckpt(
self,
model,
train_iter,
optimizer=None,
is_best=False,
metric_val=None,
ckpt_fname=None,
):
# Load ckpt info
info = self.load_ckpt_info()
info = self.clean_up_ckpt_info(info)
# New checkpoint data and name
checkpoint = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict() if optimizer else None,
}
if not ckpt_fname:
if is_best:
ckpt_fname = self.config.best_fname_format.format(train_iter)
else:
ckpt_fname = self.config.ckpt_fname_format.format(train_iter)
ckpt_fpath = os.path.join(self.config.checkpoint_dir, ckpt_fname)
# Save new ckpt
if not os.path.exists(self.config.checkpoint_dir):
os.makedirs(self.config.checkpoint_dir)
if is_best:
# For best, delete the old one and save the new one
assert metric_val is not None
if info["best_ckpt"]:
old_ckpt_fpath = os.path.join(
self.config.checkpoint_dir, info["best_ckpt"]["fname"]
)
logger.debug(f"Removing ckpt {old_ckpt_fpath}")
os.remove(old_ckpt_fpath)
logger.debug(f"Saving ckpt to {ckpt_fpath}")
torch.save(checkpoint, ckpt_fpath)
info["best_ckpt"] = {
"fname": ckpt_fname,
self.config.metric.name: metric_val,
}
else:
# For iter based ckpt, save first and delete excessive ones
info["iter_ckpts"] = {
i: fname
for i, fname in info["iter_ckpts"].items()
if fname != ckpt_fname
}
logger.debug(f"Saving ckpt to {ckpt_fpath}")
torch.save(checkpoint, ckpt_fpath)
info["iter_ckpts"][train_iter] = ckpt_fname
train_iters_del = sorted(info["iter_ckpts"].keys(), reverse=True)
train_iters_del = train_iters_del[self.config.max_to_keep :]
for i in train_iters_del:
old_ckpt_fpath = os.path.join(
self.config.checkpoint_dir, info["iter_ckpts"][i]
)
logger.debug(f"Removing ckpt {old_ckpt_fpath}")
os.remove(old_ckpt_fpath)
del info["iter_ckpts"][i]
# Save ckpt info
self.save_ckpt_info(info)
return ckpt_fname
def load_ckpt(self, model, ckpt_fname, optimizer=None):
ckpt_fpath = os.path.join(self.config.checkpoint_dir, ckpt_fname)
logger.debug(f"Loading ckpt from {ckpt_fpath}")
checkpoint = torch.load(ckpt_fpath, map_location="cpu")
model.load_state_dict(checkpoint["model"])
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer"])
def save_args(self, args):
if not os.path.exists(self.config.checkpoint_dir):
os.makedirs(self.config.checkpoint_dir)
args_fpath = os.path.join(self.config.checkpoint_dir, "args.json")
logger.debug(f"Saving arguments to {args_fpath}")
with open(args_fpath, "w") as fd:
json.dump(vars(args), fd)
| 7,043 | 34.044776 | 78 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/utils/misc.py | """Miscellaneous utility functions."""
import copy
import itertools
import random
import numpy as np
import torch
def seed(value=42):
"""Set random seed for everything.
Args:
value (int): Seed
"""
np.random.seed(value)
torch.manual_seed(value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(value)
def map_dict_to_obj(dic):
result_dic = {}
if dic is not None:
for k, v in dic.items():
if isinstance(v, dict):
result_dic[k] = map_dict_to_obj(v)
else:
try:
obj = configmapper.get_object("params", v)
result_dic[k] = obj
except:
result_dic[k] = v
return result_dic
def get_item_in_config(config, path):
# config is a dictionary
curr = config
if isinstance(config, dict):
for step in path:
curr = curr[step]
if curr is None:
break
else:
for step in path:
curr = curr.__getattr__(step)
if curr is None:
break
return curr
# init = train_config.grid_search
# curr = get_item_in_config(init,['hyperparams','loader_params'])
# curr.set_value('batch_size',1)
# print(train_config.grid_search)
def generate_grid_search_configs(main_config, grid_config, root="hyperparams"):
# DFS
locations_values_pair = {}
init = grid_config.as_dict()
# print(init)
stack = [root]
visited = [stack[-1]]
log_label_path = None
hparams_path = None
# root = init[stack[-1]]
while len(stack) != 0:
root = get_item_in_config(init, stack)
flag = 0
# print(visited)
# print(stack)
if (
not isinstance(root, dict) and "hparams" not in stack
): # Meaning it is a leaf node
# print(stack)
if isinstance(root, list):
locations_values_pair[
tuple(copy.deepcopy(stack))
] = root # Append the current stack, and the list values
else:
locations_values_pair[tuple(copy.deepcopy(stack))] = [
root,
] # Append the current stack, and the list values
_ = stack.pop() # Pop this root because we don't need it.
else:
if isinstance(root, list) and "hparams" in stack:
hparams_path = copy.deepcopy(stack)
visited.append(".".join(stack))
stack.pop()
continue
if "log_label" in root.keys():
log_label_path = copy.deepcopy(
stack
+ [
"log_label",
]
)
if "log_label" in root.keys():
log_label_path = copy.deepcopy(
stack
+ [
"log_label",
]
)
parent = root # Otherwise it has children
for key in parent.keys(): # For the children
if (
".".join(
stack
+ [
key,
]
)
not in visited
): # Check if I have visited these children
flag = 1 # If not, we need to repeat the process for this key
stack.append(key) # Append this key to the stack
visited.append(".".join(stack))
break
if flag == 0:
stack.pop()
paths = list(locations_values_pair.keys())
values = itertools.product(*list(locations_values_pair.values()))
result_configs = []
for value in values:
for item_index in range(len(value)):
curr_path = paths[item_index]
curr_item = value[item_index]
curr_config_item = get_item_in_config(main_config, curr_path[1:-1])
curr_config_item.set_value(curr_path[-1], curr_item)
log_item = get_item_in_config(main_config, log_label_path[1:-1])
log_item.set_value(log_label_path[-1], str(len(result_configs) + 1))
hparam_item = get_item_in_config(main_config, hparams_path[1:-1])
hparam_item.set_value(
hparams_path[-1],
get_item_in_config(grid_config.hyperparams, hparams_path[1:]),
)
result_configs.append(copy.deepcopy(main_config))
return result_configs
# HTML formatting for visualizing word importance
# Source: https://github.com/gchhablani/toxic-spans-detection/
def _get_color(attr):
# clip values to prevent CSS errors (Values should be from [-1,1])
attr = max(-1, min(1, attr))
if attr > 0:
hue = 10
sat = 100
lig = 100 - int(80 * attr)
else:
hue = 220
sat = 100
# lig = 100 - int(-125 * attr)
lig = 100 - int(-80 * attr)
return "hsl({}, {}%, {}%)".format(hue, sat, lig)
def format_special_tokens(token):
"""Convert <> to # if there are any HTML syntax tags.
Example: '<Hello>' will be converted to '#Hello' to avoid confusion
with HTML tags.
Args:
token (str): The token to be formatted.
Returns:
(str): The formatted token.
"""
if token.startswith("<") and token.endswith(">"):
return "#" + token.strip("<>")
return token
def html_word_importance(words, importances):
assert len(words) <= len(importances)
tags = ["<div>"]
for word_index, (word, importance) in enumerate(
zip(words, importances[: len(words)])
):
word = format_special_tokens(word)
for character in word: ## Printing Weird Words
if ord(character) >= 128:
print(word)
break
bg_color = _get_color(importance)
font_color = "white" if abs(importance) > 0.5 else "black"
unwrapped_tag = f"""<mark style="background-color:{bg_color};\
opacity:1.0;line-height:1.75" title="{importance:.4f}">\
<font color="{font_color}"> {word} </font></mark>"""
tags.append(unwrapped_tag)
tags.append("</div>")
return "".join(tags)
| 6,349 | 28.812207 | 80 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/utils/graph_writers.py | import json
import os
import torch
from torch.utils.tensorboard import SummaryWriter
from anemic.utils.mapper import ConfigMapper
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
class GraphWriterBase:
def __init__(self, config):
self.config = config
def writer_scalar(self, name, value, step=None):
raise NotImplementedError()
@ConfigMapper.map("graph_writers", "tensorboard")
class TensorboardGraphWriter(GraphWriterBase):
def __init__(self, config):
cls_name = self.__class__.__name__
logger.debug(f"Initializing {cls_name} with config: {config}")
super().__init__(config)
# Tensorboard writer
self.writer = SummaryWriter(log_dir=self.config.log_dir)
def write_scalar(self, name, value, step=None):
self.writer.add_scalar(tag=name, scalar_value=value, global_step=step)
self.writer.flush()
@ConfigMapper.map("graph_writers", "wandb")
class WandBGraphWriter(GraphWriterBase):
def __init__(self, config):
cls_name = self.__class__.__name__
logger.debug(f"Initializing {cls_name} with config: {config}")
super().__init__(config)
def writer_scalar(self, name, value, step=None):
raise NotImplementedError()
| 1,279 | 26.234043 | 78 | py |
icd-coding-benchmark | icd-coding-benchmark-main/anemic/trainers/base_trainer.py | import math
import os
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from tqdm import tqdm
from anemic.modules.metrics import load_metric
from anemic.utils.configuration import Config
from anemic.utils.file_loaders import save_json
from anemic.utils.mapper import ConfigMapper
from anemic.utils.text_loggers import get_logger
logger = get_logger(__name__)
@ConfigMapper.map("trainers", "base_trainer")
class BaseTrainer:
def __init__(self, config):
cls_name = self.__class__.__name__
logger.info(f"Initializing {cls_name}")
logger.debug(f"Initializing {cls_name} with config: {config}")
self.config = config
# Loss function
self.loss_fn = ConfigMapper.get_object(
"losses",
self.config.loss.name,
)(self.config.loss.params)
logger.debug(
f"Created loss function {self.loss_fn.__class__.__name__} with "
f"config: {self.config.loss.params}"
)
# Evaluation metrics
self.eval_metrics = {}
for config_dict in self.config.eval_metrics:
metric_name = config_dict.name
self.eval_metrics[metric_name] = load_metric(config_dict)
def train(self, model, train_dataset, val_dataset=None):
"""Train the model"""
self.model = model
self.train_dataset = train_dataset
self.val_dataset = val_dataset
# Data loader
train_loader_config = self.config.data_loader.as_dict()
logger.debug(f"Creating train DataLoader: {train_loader_config}")
if "collate_fn" in dir(train_dataset):
train_loader_config["collate_fn"] = train_dataset.collate_fn
train_loader = DataLoader(train_dataset, **train_loader_config)
if val_dataset:
# We force the val dataset not shuffled and fully used
val_loader_config = self.config.data_loader.as_dict()
val_loader_config["drop_last"] = False
val_loader_config["shuffle"] = False
logger.debug(f"Creating val DataLoader: {val_loader_config}")
if "collate_fn" in dir(val_dataset):
val_loader_config["collate_fn"] = val_dataset.collate_fn
val_loader = DataLoader(val_dataset, **val_loader_config)
batch_size = self.config.data_loader.batch_size
if train_loader_config["drop_last"]:
num_train_batch = math.floor(len(train_dataset) / batch_size)
else:
num_train_batch = math.ceil(len(train_dataset) / batch_size)
# Optimizer & LR scheduler
optimizer = ConfigMapper.get_object(
"optimizers", self.config.optimizer.name
)(model.parameters(), **self.config.optimizer.params.as_dict())
logger.debug(
f"Created optimizer {optimizer.__class__.__name__} with config: "
f"{self.config.optimizer.params}"
)
scheduler = None
if self.config.lr_scheduler is not None:
if "warmup" in self.config.lr_scheduler.name:
warm_up_steps = (
self.config.lr_scheduler.params.warm_up_proportion
* (len(train_dataset) // batch_size)
)
num_training_steps = len(train_dataset) // batch_size
scheduler = ConfigMapper.get_object(
"schedulers", self.config.lr_scheduler.name
)(optimizer, warm_up_steps, num_training_steps)
logger.debug(
f"Created scheduler {scheduler.__class__.__name__} with "
f"config: {self.config.lr_scheduler.params}"
)
else:
scheduler = ConfigMapper.get_object(
"schedulers", self.config.lr_scheduler.name
)(optimizer, **self.config.lr_scheduler.params.as_dict())
logger.debug(
f"Created scheduler {scheduler.__class__.__name__} with "
f"config: {self.config.lr_scheduler.params}"
)
# Add evaluation metrics for graph
for config in (
self.config.graph.train.metric + self.config.graph.val.metric
):
metric_name = config.name
if metric_name not in self.eval_metrics and metric_name != "loss":
self.eval_metrics[metric_name] = load_metric(config)
train_metric_names = [
metric.name for metric in self.config.graph.train.metric
]
val_metric_names = [
metric.name for metric in self.config.graph.val.metric
]
# Stopping criterion: (metric, max/min, patience)
max_epochs = int(self.config.max_epochs)
stopping_criterion = None
if self.config.stopping_criterion is not None:
sc_config = self.config.stopping_criterion
# Load metric
sc_metric_config = sc_config.metric
if sc_metric_config.name in self.eval_metrics:
sc_metric = self.eval_metrics[sc_metric_config.name]
else:
sc_metric = load_metric(sc_metric_config)
# Metric + max/min + patience
stopping_criterion = (
sc_metric,
sc_config.desired,
sc_config.patience,
)
best_stopping_val = float("inf")
best_stopping_epoch = 0
if sc_config.desired == "max":
best_stopping_val *= -1.0
if self.config.use_gpu:
model.cuda()
logger.info("Use GPU")
# Checkpoint saver
ckpt_saver = ConfigMapper.get_object(
"checkpoint_savers", self.config.checkpoint_saver.name
)(self.config.checkpoint_saver.params)
# Load latest checkpoint
latest_ckpt = ckpt_saver.get_latest_checkpoint()
if latest_ckpt is not None:
ckpt_epoch, ckpt_fname = latest_ckpt
ckpt_saver.load_ckpt(
model=model, optimizer=optimizer, ckpt_fname=ckpt_fname
)
logger.info(f"Checkpoint loaded from {ckpt_fname}")
init_epoch = ckpt_epoch + 1
else:
init_epoch = 0
global_step = (len(train_dataset) // batch_size) * init_epoch
# Graph Writer (tensorboard)
writer = ConfigMapper.get_object(
"graph_writers", self.config.graph.writer.name
)(self.config.graph.writer.params)
# Train!
for epoch in range(init_epoch, max_epochs):
# Print training epoch
logger.info(f"Epoch: {epoch}/{max_epochs}, Step {global_step:6}")
model.train()
# Train for one epoch
pbar = tqdm(total=num_train_batch)
pbar.set_description(f"Epoch {epoch}")
for batch_train in train_loader:
optimizer.zero_grad()
batch_inputs, batch_labels = batch_train
if self.config.use_gpu:
batch_inputs = batch_inputs.cuda()
batch_labels = batch_labels.cuda()
batch_outputs = model(batch_inputs)
batch_loss = self.loss_fn(
input=batch_outputs, target=batch_labels
)
if "regularizer" in dir(model):
batch_loss += model.regularizer(labels=batch_labels)
batch_loss.backward()
optimizer.step()
if scheduler:
scheduler.step()
# Write graph on proper steps (train)
if (
self.config.graph.train.interval_unit == "step"
and global_step % self.config.graph.train.interval == 0
):
train_metric_vals = self._compute_metrics(
outputs=batch_outputs.detach().cpu(),
labels=batch_labels.cpu(),
metric_names=train_metric_names,
)
for metric_name, metric_val in train_metric_vals.items():
writer.write_scalar(
f"train/{metric_name}", metric_val, step=global_step
)
pbar.set_postfix_str(f"Train Loss: {batch_loss.item():.6f}")
pbar.update(1)
global_step += 1
pbar.close()
# Evaluate on eval dataset
if val_dataset:
val_outputs, val_labels = self._forward_epoch(
model, dataloader=val_loader
)
val_loss = self.loss_fn(input=val_outputs, target=val_labels)
val_labels_np = val_labels.numpy()
val_prob = torch.sigmoid(val_outputs).numpy()
val_pred = val_prob.round()
logger.info("Evaluate on val dataset")
for metric_config in self.config.eval_metrics:
metric_name = metric_config.name
metric_val = self.eval_metrics[metric_name](
y_true=val_labels_np, y_pred=val_pred, p_pred=val_prob
)
logger.info(f"{metric_name:>12}: {metric_val:6f}")
# Plot graph on proper epochs (val)
if (
val_dataset
and self.config.graph.val.interval_unit == "epoch"
and epoch % self.config.graph.val.interval == 0
):
val_metric_vals = self._compute_metrics(
outputs=val_outputs,
labels=val_labels,
metric_names=val_metric_names,
)
for metric_name, metric_val in val_metric_vals.items():
writer.write_scalar(
f"val/{metric_name}", metric_val, step=global_step
)
# Update learning rate
if scheduler is not None:
if isinstance(scheduler, ReduceLROnPlateau):
# ReduceLROnPlateau uses validation loss
if val_dataset:
scheduler.step(val_loss)
else:
scheduler.step()
# Update best stopping condition
if val_dataset and stopping_criterion:
metric, desired, patience = stopping_criterion
stopping_val = metric(
y_true=val_labels, p_pred=val_outputs, y_pred=val_pred
)
if desired == "max" and stopping_val > best_stopping_val:
best_stopping_val = stopping_val
best_stopping_epoch = epoch
if desired == "min" and stopping_val < best_stopping_val:
best_stopping_val = stopping_val
best_stopping_epoch = epoch
# Checkpoint 1. Per interval epoch
if ckpt_saver.check_interval(epoch):
ckpt_fname = ckpt_saver.save_ckpt(
model=model, optimizer=optimizer, train_iter=epoch
)
logger.info(f"Checkpoint saved to {ckpt_fname}")
# Checkpoint 2. Best val metric
if val_dataset:
metric_val, is_best = ckpt_saver.check_best(
y_true=val_labels, p_pred=val_prob, y_pred=val_pred
)
if is_best:
ckpt_fname = ckpt_saver.save_ckpt(
model=model,
optimizer=optimizer,
train_iter=epoch,
is_best=True,
metric_val=metric_val,
)
logger.info(
f"Checkpoint saved to {ckpt_fname} "
f"({ckpt_saver.config.metric.name}: "
f"{metric_val:.6f})"
)
# Stop training if condition met
if val_dataset and (epoch - best_stopping_epoch >= patience):
break
# Wrapping up
# Save the last checkpoint, if not saved above
if not ckpt_saver.check_interval(epoch):
ckpt_fname = ckpt_saver.save_ckpt(
model=model, optimizer=optimizer, train_iter=epoch
)
logger.info(f"Checkpoint saved to {ckpt_fname}")
logger.info("Training completed")
return
def test(self, model, test_dataset):
"""Load the best or latest ckpt and evalutate on the given dataset."""
# Load the best or the latest model
ckpt_saver = ConfigMapper.get_object(
"checkpoint_savers", self.config.checkpoint_saver.name
)(self.config.checkpoint_saver.params)
best_ckpt = ckpt_saver.get_best_checkpoint()
if best_ckpt is not None:
ckpt_fname = best_ckpt
else:
latest_ckpt = ckpt_saver.get_latest_checkpoint()
if latest_ckpt is not None:
ckpt_fname = latest_ckpt[1]
else:
logger.error("Cannot find a model checkpoint")
return
ckpt_saver.load_ckpt(model, ckpt_fname, optimizer=None)
logger.info(f"Loaded checkpoint from {ckpt_fname}")
if self.config.use_gpu:
model.cuda()
logger.info("Use GPU")
# Evaluate on test dataset
logger.info("Evaluating on test dataset")
metric_vals = self.evaluate(model, test_dataset)
# Print and save results
for metric_name, metric_val in metric_vals.items():
logger.info(f"{metric_name:>12}: {metric_val:6f}")
result_fpath = os.path.join(self.config.output_dir, "test_result.json")
logger.info(f"Saving result on {result_fpath}")
save_json(metric_vals, result_fpath)
def evaluate(self, model, dataset=None, dataloader=None):
"""Evaluate the model on the given dataset."""
# Get preds and labels for the whole epoch
epoch_outputs, epoch_labels = self._forward_epoch(
model, dataset=dataset, dataloader=dataloader
)
# Evaluate the predictions using self.eval_metrics
return self._compute_metrics(epoch_outputs, epoch_labels)
def _compute_metrics(self, outputs, labels, metric_names=None):
"""
Compute the metrics of given names. Inputs should be Torch tensors.
"""
metric_vals = {}
if metric_names is None:
metric_names = self.eval_metrics.keys()
labels_np = np.array(labels)
probs = np.array(torch.sigmoid(outputs))
preds = probs.round()
for metric_name in metric_names:
if metric_name == "loss":
metric_vals["loss"] = self.loss_fn(input=outputs, target=labels)
else:
metric_vals[metric_name] = self.eval_metrics[metric_name](
y_true=labels_np, p_pred=probs, y_pred=preds
)
return metric_vals
def _forward_epoch(self, model, dataset=None, dataloader=None):
"""Compute the forward pass on the given dataset."""
assert dataset or dataloader
# Dataloader
if dataloader is None:
# We force the data loader is not shuffled and fully checked
data_config = self.config.data_loader.as_dict()
data_config["drop_last"] = False
data_config["shuffle"] = False
logger.debug(f"Creating test DataLoader: {data_config}")
if "collate_fn" in dir(dataset):
data_config["collate_fn"] = dataset.collate_fn
dataloader = DataLoader(dataset, **data_config)
# Forward for the whole batch
model.eval()
epoch_outputs, epoch_labels = [], []
with torch.no_grad():
for i, (batch_inputs, batch_labels) in enumerate(dataloader):
if self.config.use_gpu:
batch_inputs = batch_inputs.cuda()
batch_labels = batch_labels.cuda()
batch_outputs = model(batch_inputs)
epoch_labels.append(batch_labels.cpu())
epoch_outputs.append(batch_outputs.cpu())
# Concat
epoch_labels = torch.cat(epoch_labels, 0)
epoch_outputs = torch.cat(epoch_outputs, 0)
return epoch_outputs, epoch_labels
| 16,526 | 39.211679 | 80 | py |
pykg2vec | pykg2vec-master/pykg2vec/models/Domain.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Domain module for building Knowledge Graphs
"""
from torch.nn import Embedding
class NamedEmbedding(Embedding):
""" Associate embeddings with human-readable names"""
def __init__(self, name, *args, **kwargs):
super(NamedEmbedding, self).__init__(*args, **kwargs)
self._name = name
@property
def name(self):
return self._name
| 418 | 22.277778 | 61 | py |
pykg2vec | pykg2vec-master/pykg2vec/models/pairwise.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from pykg2vec.models.KGMeta import PairwiseModel
from pykg2vec.models.Domain import NamedEmbedding
from pykg2vec.utils.criterion import Criterion
class TransE(PairwiseModel):
"""
`Translating Embeddings for Modeling Multi-relational Data`_ (TransE)
is an energy based model which represents the relationships as translations in the embedding space.
Specifically, it assumes that if a fact (h, r, t) holds then the embedding of the tail 't'
should be close to the embedding of head entity 'h' plus some vector that
depends on the relationship 'r'.
Which means that if (h,r,t) holds then the embedding of the tail
't' should be close to the embedding of head entity 'h'
plus some vector that depends on the relationship 'r'.
In TransE, both entities and relations are vectors in the same space
Args:
config (object): Model configuration parameters.
Portion of the code based on `OpenKE_TransE`_ and `wencolani`_.
.. _OpenKE_TransE: https://github.com/thunlp/OpenKE/blob/master/models/TransE.py
.. _wencolani: https://github.com/wencolani/TransE.git
.. _Translating Embeddings for Modeling Multi-relational Data:
http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela
"""
def __init__(self, **kwargs):
super(TransE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "l1_flag"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.loss = Criterion.pairwise_hinge
def forward(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids.
t (Tensor): Tail entity ids.
Returns:
Tensors: the scores of evaluationReturns head, relation and tail embedding Tensors.
"""
h_e, r_e, t_e = self.embed(h, r, t)
norm_h_e = F.normalize(h_e, p=2, dim=-1)
norm_r_e = F.normalize(r_e, p=2, dim=-1)
norm_t_e = F.normalize(t_e, p=2, dim=-1)
if self.l1_flag:
return torch.norm(norm_h_e + norm_r_e - norm_t_e, p=1, dim=-1)
return torch.norm(norm_h_e + norm_r_e - norm_t_e, p=2, dim=-1)
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids.
t (Tensor): Tail entity ids.
Returns:
Tensors: Returns a tuple of head, relation and tail embedding Tensors.
"""
h_e = self.ent_embeddings(h)
r_e = self.rel_embeddings(r)
t_e = self.ent_embeddings(t)
return h_e, r_e, t_e
class TransH(PairwiseModel):
"""
`Knowledge Graph Embedding by Translating on Hyperplanes`_ (TransH) follows the general principle
of the TransE. However, compared to it, it introduces relation-specific hyperplanes.
The entities are represented as vecotrs just like in TransE,
however, the relation is modeled as a vector on its own hyperplane with a normal vector.
The entities are then projected to the relation hyperplane to calculate the loss.
TransH models a relation as a hyperplane together with a translation operation on it.
By doing this, it aims to preserve the mapping properties of relations such as reflexive,
one-to-many, many-to-one, and many-to-many with almost the same model complexity of TransE.
Args:
config (object): Model configuration parameters.
Portion of the code based on `OpenKE_TransH`_ and `thunlp_TransH`_.
.. _OpenKE_TransH:
https://github.com/thunlp/OpenKE/blob/master/models/TransH.py
.. _thunlp_TransH:
https://github.com/thunlp/TensorFlow-TransX/blob/master/transH.py
.. _Knowledge Graph Embedding by Translating on Hyperplanes:
https://pdfs.semanticscholar.org/2a3f/862199883ceff5e3c74126f0c80770653e05.pdf
"""
def __init__(self, **kwargs):
super(TransH, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "l1_flag"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.hidden_size)
self.w = NamedEmbedding("w", self.tot_relation, self.hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.w.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.w,
]
self.loss = Criterion.pairwise_hinge
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
norm_h_e = F.normalize(h_e, p=2, dim=-1)
norm_r_e = F.normalize(r_e, p=2, dim=-1)
norm_t_e = F.normalize(t_e, p=2, dim=-1)
if self.l1_flag:
return torch.norm(norm_h_e + norm_r_e - norm_t_e, p=1, dim=-1)
return torch.norm(norm_h_e + norm_r_e - norm_t_e, p=2, dim=-1)
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
proj_vec = self.w(r)
emb_h = self._projection(emb_h, proj_vec)
emb_t = self._projection(emb_t, proj_vec)
return emb_h, emb_r, emb_t
@staticmethod
def _projection(emb_e, proj_vec):
"""Calculates the projection of entities"""
proj_vec = F.normalize(proj_vec, p=2, dim=-1)
# [b, k], [b, k]
return emb_e - torch.sum(emb_e * proj_vec, dim=-1, keepdims=True) * proj_vec
class TransD(PairwiseModel):
r"""
`Knowledge Graph Embedding via Dynamic Mapping Matrix`_ (TransD) is an improved version of TransR.
For each triplet :math:`(h, r, t)`, it uses two mapping matrices :math:`M_{rh}`, :math:`M_{rt}` :math:`\in` :math:`R^{mn}` to project entities from entity space to relation space.
TransD constructs a dynamic mapping matrix for each entity-relation pair by considering the diversity of entities and relations simultaneously.
Compared with TransR/CTransR, TransD has fewer parameters and has no matrix vector multiplication.
Args:
config (object): Model configuration parameters.
Portion of the code based on `OpenKE_TransD`_.
.. _OpenKE_TransD:
https://github.com/thunlp/OpenKE/blob/master/models/TransD.py
.. _Knowledge Graph Embedding via Dynamic Mapping Matrix:
https://www.aclweb.org/anthology/P15-1067
"""
def __init__(self, **kwargs):
super(TransD, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "rel_hidden_size", "ent_hidden_size", "l1_flag"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.ent_hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.rel_hidden_size)
self.ent_mappings = NamedEmbedding("ent_mappings", self.tot_entity, self.ent_hidden_size)
self.rel_mappings = NamedEmbedding("rel_mappings", self.tot_relation, self.rel_hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.ent_mappings.weight)
nn.init.xavier_uniform_(self.rel_mappings.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.ent_mappings,
self.rel_mappings,
]
self.loss = Criterion.pairwise_hinge
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
h_m = self.ent_mappings(h)
r_m = self.rel_mappings(r)
t_m = self.ent_mappings(t)
emb_h = self._projection(emb_h, h_m, r_m)
emb_t = self._projection(emb_t, t_m, r_m)
return emb_h, emb_r, emb_t
def forward(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids.
t (Tensor): Tail entity ids.
Returns:
Tensors: the scores of evaluationReturns head, relation and tail embedding Tensors.
"""
h_e, r_e, t_e = self.embed(h, r, t)
norm_h_e = F.normalize(h_e, p=2, dim=-1)
norm_r_e = F.normalize(r_e, p=2, dim=-1)
norm_t_e = F.normalize(t_e, p=2, dim=-1)
if self.l1_flag:
return torch.norm(norm_h_e + norm_r_e - norm_t_e, p=1, dim=-1)
return torch.norm(norm_h_e + norm_r_e - norm_t_e, p=2, dim=-1)
@staticmethod
def _projection(emb_e, emb_m, proj_vec):
# [b, k] + sigma ([b, k] * [b, k]) * [b, k]
return emb_e + torch.sum(emb_e * emb_m, axis=-1, keepdims=True) * proj_vec
class TransM(PairwiseModel):
"""
`Transition-based Knowledge Graph Embedding with Relational Mapping Properties`_ (TransM)
is another line of research that improves TransE by relaxing the overstrict requirement of
h+r ==> t. TransM associates each fact (h, r, t) with a weight theta(r) specific to the relation.
TransM helps to remove the the lack of flexibility present in TransE when it comes to mapping properties of triplets. It utilizes the structure of the knowledge graph via pre-calculating the distinct weight for each training triplet according to its relational mapping property.
Args:
config (object): Model configuration parameters.
.. _Transition-based Knowledge Graph Embedding with Relational Mapping Properties:
https://pdfs.semanticscholar.org/0ddd/f37145689e5f2899f8081d9971882e6ff1e9.pdf
"""
def __init__(self, **kwargs):
super(TransM, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "l1_flag"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.hidden_size)
rel_head = {x: [] for x in range(self.tot_relation)}
rel_tail = {x: [] for x in range(self.tot_relation)}
rel_counts = {x: 0 for x in range(self.tot_relation)}
train_triples_ids = kwargs["knowledge_graph"].read_cache_data('triplets_train')
for t in train_triples_ids:
rel_head[t.r].append(t.h)
rel_tail[t.r].append(t.t)
rel_counts[t.r] += 1
theta = [1/np.log(2+rel_counts[x]/(1+len(rel_tail[x])) + rel_counts[x]/(1+len(rel_head[x]))) for x in range(self.tot_relation)]
self.theta = torch.from_numpy(np.asarray(theta, dtype=np.float32)).to(kwargs["device"])
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.loss = Criterion.pairwise_hinge
def forward(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids.
t (Tensor): Tail entity ids.
Returns:
Tensors: the scores of evaluationReturns head, relation and tail embedding Tensors.
"""
h_e, r_e, t_e = self.embed(h, r, t)
norm_h_e = F.normalize(h_e, p=2, dim=-1)
norm_r_e = F.normalize(r_e, p=2, dim=-1)
norm_t_e = F.normalize(t_e, p=2, dim=-1)
r_theta = self.theta[r]
if self.l1_flag:
return r_theta*torch.norm(norm_h_e + norm_r_e - norm_t_e, p=1, dim=-1)
return r_theta*torch.norm(norm_h_e + norm_r_e - norm_t_e, p=2, dim=-1)
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
class TransR(PairwiseModel):
"""
`Learning Entity and Relation Embeddings for Knowledge Graph Completion`_ (TransR) is a translation based knowledge graph embedding method. Similar to TransE and TransH, it also
builds entity and relation embeddings by regarding a relation as translation from head entity to tail
entity. However, compared to them, it builds the entity and relation embeddings in a separate entity
and relation spaces. Portion of the code based on `thunlp_transR`_.
Args:
config (object): Model configuration parameters.
.. _thunlp_transR:
https://github.com/thunlp/TensorFlow-TransX/blob/master/transR.py
.. _Learning Entity and Relation Embeddings for Knowledge Graph Completion:
http://nlp.csai.tsinghua.edu.cn/~lyk/publications/aaai2015_transr.pdf
"""
def __init__(self, **kwargs):
super(TransR, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "rel_hidden_size", "ent_hidden_size", "l1_flag"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.ent_hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.rel_hidden_size)
self.rel_matrix = NamedEmbedding("rel_matrix", self.tot_relation, self.ent_hidden_size * self.rel_hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.rel_matrix.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.rel_matrix,
]
self.loss = Criterion.pairwise_hinge
def transform(self, e, matrix):
matrix = matrix.view(-1, self.ent_hidden_size, self.rel_hidden_size)
if e.shape[0] != matrix.shape[0]:
e = e.view(-1, matrix.shape[0], self.ent_hidden_size).permute(1, 0, 2)
e = torch.matmul(e, matrix).permute(1, 0, 2)
else:
e = e.view(-1, 1, self.ent_hidden_size)
e = torch.matmul(e, matrix)
return e.view(-1, self.rel_hidden_size)
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
h_e = self.ent_embeddings(h)
r_e = self.rel_embeddings(r)
t_e = self.ent_embeddings(t)
h_e = F.normalize(h_e, p=2, dim=-1)
r_e = F.normalize(r_e, p=2, dim=-1)
t_e = F.normalize(t_e, p=2, dim=-1)
h_e = torch.unsqueeze(h_e, 1)
t_e = torch.unsqueeze(t_e, 1)
# [b, 1, k]
matrix = self.rel_matrix(r)
# [b, k, d]
transform_h_e = self.transform(h_e, matrix)
transform_t_e = self.transform(t_e, matrix)
# [b, 1, d] = [b, 1, k] * [b, k, d]
h_e = torch.squeeze(transform_h_e, axis=1)
t_e = torch.squeeze(transform_t_e, axis=1)
# [b, d]
return h_e, r_e, t_e
def forward(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids.
t (Tensor): Tail entity ids.
Returns:
Tensors: the scores of evaluationReturns head, relation and tail embedding Tensors.
"""
h_e, r_e, t_e = self.embed(h, r, t)
norm_h_e = F.normalize(h_e, p=2, dim=-1)
norm_r_e = F.normalize(r_e, p=2, dim=-1)
norm_t_e = F.normalize(t_e, p=2, dim=-1)
if self.l1_flag:
return torch.norm(norm_h_e + norm_r_e - norm_t_e, p=1, dim=-1)
return torch.norm(norm_h_e + norm_r_e - norm_t_e, p=2, dim=-1)
class SLM(PairwiseModel):
"""
In `Reasoning With Neural Tensor Networks for Knowledge Base Completion`_,
SLM model is designed as a baseline of Neural Tensor Network.
The model constructs a nonlinear neural network to represent the score function.
Args:
config (object): Model configuration parameters.
.. _Reasoning With Neural Tensor Networks for Knowledge Base Completion:
https://nlp.stanford.edu/pubs/SocherChenManningNg_NIPS2013.pdf
"""
def __init__(self, **kwargs):
super(SLM, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "rel_hidden_size", "ent_hidden_size"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.ent_hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.rel_hidden_size)
self.mr1 = NamedEmbedding("mr1", self.ent_hidden_size, self.rel_hidden_size)
self.mr2 = NamedEmbedding("mr2", self.ent_hidden_size, self.rel_hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.mr1.weight)
nn.init.xavier_uniform_(self.mr2.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.mr1,
self.mr2,
]
self.loss = Criterion.pairwise_hinge
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
norm_h = F.normalize(h_e, p=2, dim=-1)
norm_r = F.normalize(r_e, p=2, dim=-1)
norm_t = F.normalize(t_e, p=2, dim=-1)
return -torch.sum(norm_r * self.layer(norm_h, norm_t), -1)
def layer(self, h, t):
"""Defines the forward pass layer of the algorithm.
Args:
h (Tensor): Head entities ids.
t (Tensor): Tail entity ids of the triple.
"""
mr1h = torch.matmul(h, self.mr1.weight) # h => [m, d], self.mr1 => [d, k]
mr2t = torch.matmul(t, self.mr2.weight) # t => [m, d], self.mr2 => [d, k]
return torch.tanh(mr1h + mr2t)
class SME(PairwiseModel):
""" `A Semantic Matching Energy Function for Learning with Multi-relational Data`_
Semantic Matching Energy (SME) is an algorithm for embedding multi-relational data into vector spaces.
SME conducts semantic matching using neural network architectures. Given a fact (h, r, t), it first projects
entities and relations to their embeddings in the input layer. Later the relation r is combined with both h and t
to get gu(h, r) and gv(r, t) in its hidden layer. The score is determined by calculating the matching score of gu and gv.
There are two versions of SME: a linear version(SMELinear) as well as bilinear(SMEBilinear) version which differ in how the hidden layer is defined.
Args:
config (object): Model configuration parameters.
Portion of the code based on glorotxa_.
.. _glorotxa: https://github.com/glorotxa/SME/blob/master/model.py
.. _A Semantic Matching Energy Function for Learning with Multi-relational Data: http://www.thespermwhale.com/jaseweston/papers/ebrm_mlj.pdf
"""
def __init__(self, **kwargs):
super(SME, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.hidden_size)
self.mu1 = NamedEmbedding("mu1", self.hidden_size, self.hidden_size)
self.mu2 = NamedEmbedding("mu2", self.hidden_size, self.hidden_size)
self.bu = NamedEmbedding("bu", self.hidden_size, 1)
self.mv1 = NamedEmbedding("mv1", self.hidden_size, self.hidden_size)
self.mv2 = NamedEmbedding("mv2", self.hidden_size, self.hidden_size)
self.bv = NamedEmbedding("bv", self.hidden_size, 1)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.mu1.weight)
nn.init.xavier_uniform_(self.mu2.weight)
nn.init.xavier_uniform_(self.bu.weight)
nn.init.xavier_uniform_(self.mv1.weight)
nn.init.xavier_uniform_(self.mv2.weight)
nn.init.xavier_uniform_(self.bv.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.mu1,
self.mu2,
self.bu,
self.mv1,
self.mv2,
self.bv,
]
self.loss = Criterion.pairwise_hinge
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
def _gu_linear(self, h, r):
"""Function to calculate linear loss.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
Returns:
Tensors: Returns the bilinear loss.
"""
mu1h = torch.matmul(self.mu1.weight, h.T) # [k, b]
mu2r = torch.matmul(self.mu2.weight, r.T) # [k, b]
return (mu1h + mu2r + self.bu.weight).T # [b, k]
def _gv_linear(self, r, t):
"""Function to calculate linear loss.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
Returns:
Tensors: Returns the bilinear loss.
"""
mv1t = torch.matmul(self.mv1.weight, t.T) # [k, b]
mv2r = torch.matmul(self.mv2.weight, r.T) # [k, b]
return (mv1t + mv2r + self.bv.weight).T # [b, k]
def forward(self, h, r, t):
"""Function to that performs semanting matching.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail ids of the triple.
Returns:
Tensors: Returns the semantic matchin score.
"""
h_e, r_e, t_e = self.embed(h, r, t)
norm_h = F.normalize(h_e, p=2, dim=-1)
norm_r = F.normalize(r_e, p=2, dim=-1)
norm_t = F.normalize(t_e, p=2, dim=-1)
return -torch.sum(self._gu_linear(norm_h, norm_r) * self._gv_linear(norm_r, norm_t), 1)
class SME_BL(SME):
""" `A Semantic Matching Energy Function for Learning with Multi-relational Data`_
SME_BL is an extension of SME_ that BiLinear function to calculate the matching scores.
Args:
config (object): Model configuration parameters.
.. _`SME`: api.html#pykg2vec.models.pairwise.SME
"""
def __init__(self, **kwargs):
super(SME_BL, self).__init__(**kwargs)
self.model_name = self.__class__.__name__.lower()
self.loss = Criterion.pairwise_hinge
def _gu_bilinear(self, h, r):
"""Function to calculate bilinear loss.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
Returns:
Tensors: Returns the bilinear loss.
"""
mu1h = torch.matmul(self.mu1.weight, h.T) # [k, b]
mu2r = torch.matmul(self.mu2.weight, r.T) # [k, b]
return (mu1h * mu2r + self.bu.weight).T # [b, k]
def _gv_bilinear(self, r, t):
"""Function to calculate bilinear loss.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
Returns:
Tensors: Returns the bilinear loss.
"""
mv1t = torch.matmul(self.mv1.weight, t.T) # [k, b]
mv2r = torch.matmul(self.mv2.weight, r.T) # [k, b]
return (mv1t * mv2r + self.bv.weight).T # [b, k]
def forward(self, h, r, t):
"""Function to that performs semanting matching.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail ids of the triple.
Returns:
Tensors: Returns the semantic matchin score.
"""
h_e, r_e, t_e = self.embed(h, r, t)
norm_h = F.normalize(h_e, p=2, dim=-1)
norm_r = F.normalize(r_e, p=2, dim=-1)
norm_t = F.normalize(t_e, p=2, dim=-1)
return torch.sum(self._gu_bilinear(norm_h, norm_r) * self._gv_bilinear(norm_r, norm_t), -1)
class RotatE(PairwiseModel):
"""
`Rotate-Knowledge graph embedding by relation rotation in complex space`_ (RotatE)
models the entities and the relations in the complex vector space.
The translational relation in RotatE is defined as the element-wise 2D
rotation in which the head entity h will be rotated to the tail entity t by
multiplying the unit-length relation r in complex number form.
Args:
config (object): Model configuration parameters.
.. _Rotate-Knowledge graph embedding by relation rotation in complex space:
https://openreview.net/pdf?id=HkgEQnRqYQ
"""
def __init__(self, **kwargs):
super(RotatE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "margin"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.embedding_range = (self.margin + 2.0) / self.hidden_size
self.ent_embeddings = NamedEmbedding("ent_embeddings_real", self.tot_entity, self.hidden_size)
self.ent_embeddings_imag = NamedEmbedding("ent_embeddings_imag", self.tot_entity, self.hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embeddings_real", self.tot_relation, self.hidden_size)
nn.init.uniform_(self.ent_embeddings.weight, -self.embedding_range, self.embedding_range)
nn.init.uniform_(self.ent_embeddings_imag.weight, -self.embedding_range, self.embedding_range)
nn.init.uniform_(self.rel_embeddings.weight, -self.embedding_range, self.embedding_range)
self.parameter_list = [
self.ent_embeddings,
self.ent_embeddings_imag,
self.rel_embeddings,
]
self.loss = Criterion.pariwise_logistic
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns real and imaginary values of head, relation and tail embedding.
"""
pi = 3.14159265358979323846
h_e_r = self.ent_embeddings(h)
h_e_i = self.ent_embeddings_imag(h)
r_e_r = self.rel_embeddings(r)
t_e_r = self.ent_embeddings(t)
t_e_i = self.ent_embeddings_imag(t)
r_e_r = r_e_r / (self.embedding_range / pi)
r_e_i = torch.sin(r_e_r)
r_e_r = torch.cos(r_e_r)
return h_e_r, h_e_i, r_e_r, r_e_i, t_e_r, t_e_i
def forward(self, h, r, t):
h_e_r, h_e_i, r_e_r, r_e_i, t_e_r, t_e_i = self.embed(h, r, t)
score_r = h_e_r * r_e_r - h_e_i * r_e_i - t_e_r
score_i = h_e_r * r_e_i + h_e_i * r_e_r - t_e_i
return -(self.margin - torch.sum(score_r**2 + score_i**2, axis=-1))
class Rescal(PairwiseModel):
"""
`A Three-Way Model for Collective Learning on Multi-Relational Data`_ (RESCAL) is a tensor factorization approach to knowledge representation learning,
which is able to perform collective learning via the latent components of the factorization.
Rescal is a latent feature model where each relation is represented as a matrix modeling the iteraction between latent factors. It utilizes a weight matrix which specify how much the latent features of head and tail entities interact in the relation.
Portion of the code based on mnick_ and `OpenKE_Rescal`_.
Args:
config (object): Model configuration parameters.
.. _mnick: https://github.com/mnick/rescal.py/blob/master/rescal/rescal.py
.. _OpenKE_Rescal: https://github.com/thunlp/OpenKE/blob/master/models/RESCAL.py
.. _A Three-Way Model for Collective Learning on Multi-Relational Data : http://www.icml-2011.org/papers/438_icmlpaper.pdf
"""
def __init__(self, **kwargs):
super(Rescal, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "margin"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.hidden_size)
self.rel_matrices = NamedEmbedding("rel_matrices", self.tot_relation, self.hidden_size * self.hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_matrices.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_matrices,
]
self.loss = Criterion.pairwise_hinge
def embed(self, h, r, t):
""" Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
k = self.hidden_size
self.ent_embeddings.weight.data = self.get_normalized_data(self.ent_embeddings, self.tot_entity, dim=-1)
self.rel_matrices.weight.data = self.get_normalized_data(self.rel_matrices, self.tot_relation, dim=-1)
emb_h = self.ent_embeddings(h)
emb_r = self.rel_matrices(r)
emb_t = self.ent_embeddings(t)
emb_h = emb_h.view(-1, k, 1)
emb_r = emb_r.view(-1, k, k)
emb_t = emb_t.view(-1, k, 1)
return emb_h, emb_r, emb_t
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
# dim of h: [m, k, 1]
# r: [m, k, k]
# t: [m, k, 1]
return -torch.sum(h_e * torch.matmul(r_e, t_e), [1, 2])
@staticmethod
def get_normalized_data(embedding, num_embeddings, p=2, dim=1):
norms = torch.norm(embedding.weight, p, dim).data
return embedding.weight.data.div(norms.view(num_embeddings, 1).expand_as(embedding.weight))
class NTN(PairwiseModel):
"""
`Reasoning With Neural Tensor Networks for Knowledge Base Completion`_ (NTN) is
a neural tensor network which represents entities as an average of their constituting
word vectors. It then projects entities to their vector embeddings
in the input layer. The two entities are then combined and mapped to a non-linear hidden layer.
https://github.com/siddharth-agrawal/Neural-Tensor-Network/blob/master/neuralTensorNetwork.py
It is a neural tensor network which represents entities as an average of their constituting word vectors. It then projects entities to their vector embeddings in the input layer. The two entities are then combined and mapped to a non-linear hidden layer.
Portion of the code based on `siddharth-agrawal`_.
Args:
config (object): Model configuration parameters.
.. _siddharth-agrawal:
https://github.com/siddharth-agrawal/Neural-Tensor-Network/blob/master/neuralTensorNetwork.py
.. _Reasoning With Neural Tensor Networks for Knowledge Base Completion:
https://nlp.stanford.edu/pubs/SocherChenManningNg_NIPS2013.pdf
"""
def __init__(self, **kwargs):
super(NTN, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "ent_hidden_size", "rel_hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.ent_hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.rel_hidden_size)
self.mr1 = NamedEmbedding("mr1", self.ent_hidden_size, self.rel_hidden_size)
self.mr2 = NamedEmbedding("mr2", self.ent_hidden_size, self.rel_hidden_size)
self.br = NamedEmbedding("br", 1, self.rel_hidden_size)
self.mr = NamedEmbedding("mr", self.rel_hidden_size, self.ent_hidden_size*self.ent_hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.mr1.weight)
nn.init.xavier_uniform_(self.mr2.weight)
nn.init.xavier_uniform_(self.br.weight)
nn.init.xavier_uniform_(self.mr.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.mr1,
self.mr2,
self.br,
self.mr,
]
self.loss = Criterion.pairwise_hinge
def train_layer(self, h, t):
""" Defines the forward pass training layers of the algorithm.
Args:
h (Tensor): Head entities ids.
t (Tensor): Tail entity ids of the triple.
"""
mr1h = torch.matmul(h, self.mr1.weight) # h => [m, self.ent_hidden_size], self.mr1 => [self.ent_hidden_size, self.rel_hidden_size]
mr2t = torch.matmul(t, self.mr2.weight) # t => [m, self.ent_hidden_size], self.mr2 => [self.ent_hidden_size, self.rel_hidden_size]
expanded_h = h.unsqueeze(dim=0).repeat(self.rel_hidden_size, 1, 1) # [self.rel_hidden_size, m, self.ent_hidden_size]
expanded_t = t.unsqueeze(dim=-1) # [m, self.ent_hidden_size, 1]
temp = (torch.matmul(expanded_h, self.mr.weight.view(self.rel_hidden_size, self.ent_hidden_size, self.ent_hidden_size))).permute(1, 0, 2) # [m, self.rel_hidden_size, self.ent_hidden_size]
htmrt = torch.squeeze(torch.matmul(temp, expanded_t), dim=-1) # [m, self.rel_hidden_size]
return F.tanh(htmrt + mr1h + mr2t + self.br.weight)
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
norm_h = F.normalize(h_e, p=2, dim=-1)
norm_r = F.normalize(r_e, p=2, dim=-1)
norm_t = F.normalize(t_e, p=2, dim=-1)
return -torch.sum(norm_r*self.train_layer(norm_h, norm_t), -1)
def get_reg(self, h, r, t):
return self.lmbda*torch.sqrt(sum([torch.sum(torch.pow(var.weight, 2)) for var in self.parameter_list]))
class KG2E(PairwiseModel):
"""
`Learning to Represent Knowledge Graphs with Gaussian Embedding`_ (KG2E)
Instead of assumming entities and relations as determinstic points in the
embedding vector spaces, KG2E models both entities and relations (h, r and t)
using random variables derived from multivariate Gaussian distribution.
KG2E then evaluates a fact using translational relation by evaluating the
distance between two distributions, r and t-h. KG2E provides two distance
measures (KL-divergence and estimated likelihood).
Portion of the code based on `mana-ysh's repository`_.
Args:
config (object): Model configuration parameters.
.. _`mana-ysh's repository`:
https://github.com/mana-ysh/gaussian-embedding/blob/master/src/models/gaussian_model.py
.. _Learning to Represent Knowledge Graphs with Gaussian Embedding:
https://pdfs.semanticscholar.org/0ddd/f37145689e5f2899f8081d9971882e6ff1e9.pdf
"""
def __init__(self, **kwargs):
super(KG2E, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "cmax", "cmin"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
# the mean for each element in the embedding space.
self.ent_embeddings_mu = NamedEmbedding("ent_embeddings_mu", self.tot_entity, self.hidden_size)
self.rel_embeddings_mu = NamedEmbedding("rel_embeddings_mu", self.tot_relation, self.hidden_size)
# as the paper suggested, sigma is simplified to be the diagonal element in the covariance matrix.
self.ent_embeddings_sigma = NamedEmbedding("ent_embeddings_sigma", self.tot_entity, self.hidden_size)
self.rel_embeddings_sigma = NamedEmbedding("rel_embeddings_sigma", self.tot_relation, self.hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings_mu.weight)
nn.init.xavier_uniform_(self.rel_embeddings_mu.weight)
nn.init.xavier_uniform_(self.ent_embeddings_sigma.weight)
nn.init.xavier_uniform_(self.rel_embeddings_sigma.weight)
self.parameter_list = [
self.ent_embeddings_mu,
self.ent_embeddings_sigma,
self.rel_embeddings_mu,
self.rel_embeddings_sigma,
]
min_ent = torch.min(torch.FloatTensor().new_full(self.ent_embeddings_sigma.weight.shape, self.cmax), torch.add(self.ent_embeddings_sigma.weight, 1.0))
self.ent_embeddings_sigma.weight = nn.Parameter(torch.max(torch.FloatTensor().new_full(self.ent_embeddings_sigma.weight.shape, self.cmin), min_ent))
min_rel = torch.min(torch.FloatTensor().new_full(self.rel_embeddings_sigma.weight.shape, self.cmax), torch.add(self.rel_embeddings_sigma.weight, 1.0))
self.rel_embeddings_sigma.weight = nn.Parameter(torch.max(torch.FloatTensor().new_full(self.rel_embeddings_sigma.weight.shape, self.cmin), min_rel))
self.loss = Criterion.pairwise_hinge
def forward(self, h, r, t):
h_mu, h_sigma, r_mu, r_sigma, t_mu, t_sigma = self.embed(h, r, t)
return self._cal_score_kl_divergence(h_mu, h_sigma, r_mu, r_sigma, t_mu, t_sigma)
def embed(self, h, r, t):
"""
Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
tuple: Returns a 6-tuple of head, relation and tail embedding tensors (both real and img parts).
"""
emb_h_mu = self.ent_embeddings_mu(h)
emb_r_mu = self.rel_embeddings_mu(r)
emb_t_mu = self.ent_embeddings_mu(t)
emb_h_sigma = self.ent_embeddings_sigma(h)
emb_r_sigma = self.rel_embeddings_sigma(r)
emb_t_sigma = self.ent_embeddings_sigma(t)
emb_h_mu = self.get_normalized_data(emb_h_mu)
emb_r_mu = self.get_normalized_data(emb_r_mu)
emb_t_mu = self.get_normalized_data(emb_t_mu)
emb_h_sigma = self.get_normalized_data(emb_h_sigma)
emb_r_sigma = self.get_normalized_data(emb_r_sigma)
emb_t_sigma = self.get_normalized_data(emb_t_sigma)
return emb_h_mu, emb_h_sigma, emb_r_mu, emb_r_sigma, emb_t_mu, emb_t_sigma
@staticmethod
def get_normalized_data(embedding, p=2, dim=1):
norms = torch.norm(embedding, p, dim)
return embedding.div(norms.view(-1, 1).expand_as(embedding))
def _cal_score_kl_divergence(self, h_mu, h_sigma, r_mu, r_sigma, t_mu, t_sigma):
""" It calculates the kl_divergence as a score.
trace_fac: tr(sigma_r-1 * (sigma_h + sigma_t))
mul_fac: (mu_h + mu_r - mu_t).T * sigma_r-1 * (mu_h + mu_r - mu_t)
det_fac: log(det(sigma_r)/det(sigma_h + sigma_t))
Args:
h_mu (Tensor): Mean of the embedding value of the head.
h_sigma(Tensor): Variance of the embedding value of the head.
r_mu(Tensor): Mean of the embedding value of the relation.
r_sigma(Tensor): Variance of the embedding value of the relation.
t_mu(Tensor): Mean of the embedding value of the tail.
t_sigma(Tensor): Variance of the embedding value of the tail.
Returns:
Tensor: Score after calculating the KL_Divergence.
"""
comp_sigma = h_sigma + r_sigma
comp_mu = h_mu + r_mu
trace_fac = (comp_sigma / t_sigma).sum(-1)
mul_fac = ((t_mu - comp_mu) ** 2 / t_sigma).sum(-1)
det_fac = (torch.log(t_sigma) - torch.log(comp_sigma)).sum(-1)
return trace_fac + mul_fac + det_fac - self.hidden_size
class HoLE(PairwiseModel):
"""
`Holographic Embeddings of Knowledge Graphs`_. (HoLE) employs the circular correlation to create composition correlations. It
is able to represent and capture the interactions betweek entities and relations
while being efficient to compute, easier to train and scalable to large dataset.
Args:
config (object): Model configuration parameters.
.. _Holographic Embeddings of Knowledge Graphs:
https://arxiv.org/pdf/1510.04935.pdf
"""
def __init__(self, **kwargs):
super(HoLE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "cmax", "cmin"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, self.hidden_size)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, self.hidden_size)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.loss = Criterion.pairwise_hinge
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
r_e = F.normalize(r_e, p=2, dim=-1)
h_e = torch.stack((h_e, torch.zeros_like(h_e)), -1)
t_e = torch.stack((t_e, torch.zeros_like(t_e)), -1)
e, _ = torch.unbind(torch.ifft(torch.conj(torch.fft(h_e, 1)) * torch.fft(t_e, 1), 1), -1)
return -F.sigmoid(torch.sum(r_e * e, 1))
def embed(self, h, r, t):
"""
Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
tuple: Returns a 3-tuple of head, relation and tail embedding tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
| 46,920 | 40.050744 | 286 | py |
pykg2vec | pykg2vec-master/pykg2vec/models/projection.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from pykg2vec.models.KGMeta import ProjectionModel
from pykg2vec.models.Domain import NamedEmbedding
from pykg2vec.utils.criterion import Criterion
class ConvE(ProjectionModel):
"""
`Convolutional 2D Knowledge Graph Embeddings`_ (ConvE) is a multi-layer convolutional network model for link prediction,
it is a embedding model which is highly parameter efficient.
ConvE is the first non-linear model that uses a global 2D convolution operation on the combined and head entity and relation embedding vectors. The obtained feature maps are made flattened and then transformed through a fully connected layer. The projected target vector is then computed by performing linear transformation (passing through the fully connected layer) and activation function, and finally an inner product with the latent representation of every entities.
Args:
config (object): Model configuration parameters.
.. _Convolutional 2D Knowledge Graph Embeddings:
https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/download/17366/15884
"""
def __init__(self, **kwargs):
super(ConvE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "hidden_size_1",
"lmbda", "input_dropout", "feature_map_dropout", "hidden_dropout"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.hidden_size_2 = self.hidden_size // self.hidden_size_1
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.ent_embeddings = NamedEmbedding("ent_embedding", num_total_ent, k)
# because conve considers the reciprocal relations,
# so every rel should have its mirrored rev_rel in ConvE.
self.rel_embeddings = NamedEmbedding("rel_embedding", num_total_rel*2, k)
self.b = NamedEmbedding("b", 1, num_total_ent)
self.bn0 = nn.BatchNorm2d(1)
self.inp_drop = nn.Dropout(self.input_dropout)
self.conv2d_1 = nn.Conv2d(1, 32, (3, 3), stride=(1, 1))
self.bn1 = nn.BatchNorm2d(32)
self.feat_drop = nn.Dropout2d(self.feature_map_dropout)
self.fc = nn.Linear((2*self.hidden_size_2-3+1)*(self.hidden_size_1-3+1)*32, k) # use the conv output shape * out_channel
self.hidden_drop = nn.Dropout(self.hidden_dropout)
self.bn2 = nn.BatchNorm1d(k)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.b,
]
self.loss = Criterion.multi_class_bce
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
def embed2(self, e, r):
emb_e = self.ent_embeddings(e)
emb_r = self.rel_embeddings(r)
return emb_e, emb_r
def inner_forward(self, st_inp, first_dimension_size):
"""Implements the forward pass layers of the algorithm."""
x = self.bn0(st_inp) # 2d batch norm over feature dimension.
x = self.inp_drop(x) # [b, 1, 2*hidden_size_2, hidden_size_1]
x = self.conv2d_1(x) # [b, 32, 2*hidden_size_2-3+1, hidden_size_1-3+1]
x = self.bn1(x) # 2d batch normalization across feature dimension
x = torch.relu(x)
x = self.feat_drop(x)
x = x.view(first_dimension_size, -1) # flatten => [b, 32*(2*hidden_size_2-3+1)*(hidden_size_1-3+1)
x = self.fc(x) # dense layer => [b, k]
x = self.hidden_drop(x)
if self.training:
x = self.bn2(x) # batch normalization across the last axis
x = torch.relu(x)
x = torch.matmul(x, self.ent_embeddings.weight.T) # [b, k] * [k, tot_ent] => [b, tot_ent]
x = torch.add(x, self.b.weight) # add a bias value
return torch.sigmoid(x) # sigmoid activation
def forward(self, e, r, direction="tail"):
assert direction in ("head", "tail"), "Unknown forward direction"
if direction == "head":
e_emb, r_emb = self.embed2(e, r + self.tot_relation)
else:
e_emb, r_emb = self.embed2(e, r)
stacked_e = e_emb.view(-1, 1, self.hidden_size_2, self.hidden_size_1)
stacked_r = r_emb.view(-1, 1, self.hidden_size_2, self.hidden_size_1)
stacked_er = torch.cat([stacked_e, stacked_r], 2)
preds = self.inner_forward(stacked_er, list(e.shape)[0])
return preds
def predict_tail_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="tail"), k=topk)
return rank
def predict_head_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="head"), k=topk)
return rank
class ProjE_pointwise(ProjectionModel):
"""
`ProjE-Embedding Projection for Knowledge Graph Completion`_. (ProjE) Instead of measuring the distance or matching scores between the pair of the
head entity and relation and then tail entity in embedding space ((h,r) vs (t)).
ProjE projects the entity candidates onto a target vector representing the
input data. The loss in ProjE is computed by the cross-entropy between
the projected target vector and binary label vector, where the included
entities will have value 0 if in negative sample set and value 1 if in
positive sample set.
Instead of measuring the distance or matching scores between the pair of the head entity and relation and then tail entity in embedding space ((h,r) vs (t)). ProjE projects the entity candidates onto a target vector representing the input data. The loss in ProjE is computed by the cross-entropy between the projected target vector and binary label vector, where the included entities will have value 0 if in negative sample set and value 1 if in positive sample set.
Args:
config (object): Model configuration parameters.
.. _ProjE-Embedding Projection for Knowledge Graph Completion:
https://arxiv.org/abs/1611.05425
"""
def __init__(self, **kwargs):
super(ProjE_pointwise, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda", "hidden_dropout"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.device = kwargs["device"]
self.ent_embeddings = NamedEmbedding("ent_embedding", num_total_ent, k)
self.rel_embeddings = NamedEmbedding("rel_embedding", num_total_rel, k)
self.bc1 = NamedEmbedding("bc1", 1, k)
self.De1 = NamedEmbedding("De1", 1, k)
self.Dr1 = NamedEmbedding("Dr1", 1, k)
self.bc2 = NamedEmbedding("bc2", 1, k)
self.De2 = NamedEmbedding("De2", 1, k)
self.Dr2 = NamedEmbedding("Dr2", 1, k)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.bc1.weight)
nn.init.xavier_uniform_(self.De1.weight)
nn.init.xavier_uniform_(self.Dr1.weight)
nn.init.xavier_uniform_(self.bc2.weight)
nn.init.xavier_uniform_(self.De2.weight)
nn.init.xavier_uniform_(self.Dr2.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.bc1,
self.De1,
self.Dr1,
self.bc2,
self.De2,
self.Dr2,
]
self.loss = Criterion.multi_class
def get_reg(self, h, r, t):
return self.lmbda*(torch.sum(torch.abs(self.De1.weight) + torch.abs(self.Dr1.weight)) +
torch.sum(torch.abs(self.De2.weight) + torch.abs(self.Dr2.weight)) +
torch.sum(torch.abs(self.ent_embeddings.weight)) + torch.sum(torch.abs(self.rel_embeddings.weight)))
def forward(self, e, r, er_e2, direction="tail"):
assert direction in ("head", "tail"), "Unknown forward direction"
emb_hr_e = self.ent_embeddings(e) # [m, k]
emb_hr_r = self.rel_embeddings(r) # [m, k]
if direction == "tail":
ere2_sigmoid = ProjE_pointwise.g(torch.dropout(self.f1(emb_hr_e, emb_hr_r), p=self.hidden_dropout, train=True), self.ent_embeddings.weight)
else:
ere2_sigmoid = ProjE_pointwise.g(torch.dropout(self.f2(emb_hr_e, emb_hr_r), p=self.hidden_dropout, train=True), self.ent_embeddings.weight)
ere2_loss_left = -torch.sum((torch.log(torch.clamp(ere2_sigmoid, 1e-10, 1.0)) * torch.max(torch.FloatTensor([0]).to(self.device), er_e2)))
ere2_loss_right = -torch.sum((torch.log(torch.clamp(1 - ere2_sigmoid, 1e-10, 1.0)) * torch.max(torch.FloatTensor([0]).to(self.device), torch.neg(er_e2))))
hrt_loss = ere2_loss_left + ere2_loss_right
return hrt_loss
def f1(self, h, r):
"""Defines froward layer for head.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
"""
return torch.tanh(h * self.De1.weight + r * self.Dr1.weight + self.bc1.weight)
def f2(self, t, r):
"""Defines forward layer for tail.
Args:
t (Tensor): Tail entities ids.
r (Tensor): Relation ids of the triple.
"""
return torch.tanh(t * self.De2.weight + r * self.Dr2.weight + self.bc2.weight)
def predict_tail_rank(self, h, r, topk=-1):
emb_h = self.ent_embeddings(h) # [1, k]
emb_r = self.rel_embeddings(r) # [1, k]
hrt_sigmoid = -ProjE_pointwise.g(self.f1(emb_h, emb_r), self.ent_embeddings.weight)
_, rank = torch.topk(hrt_sigmoid, k=topk)
return rank
def predict_head_rank(self, t, r, topk=-1):
emb_t = self.ent_embeddings(t) # [m, k]
emb_r = self.rel_embeddings(r) # [m, k]
hrt_sigmoid = -ProjE_pointwise.g(self.f2(emb_t, emb_r), self.ent_embeddings.weight)
_, rank = torch.topk(hrt_sigmoid, k=topk)
return rank
@staticmethod
def g(f, w):
"""Defines activation layer.
Args:
f (Tensor): output of the forward layers.
w (Tensor): Matrix for multiplication.
"""
# [b, k] [k, tot_ent]
return torch.sigmoid(torch.matmul(f, w.T))
class TuckER(ProjectionModel):
"""
`TuckER-Tensor Factorization for Knowledge Graph Completion`_ (TuckER)
is a Tensor-factorization-based embedding technique based on
the Tucker decomposition of a third-order binary tensor of triplets. Although
being fully expressive, the number of parameters used in Tucker only grows linearly
with respect to embedding dimension as the number of entities or relations in a
knowledge graph increases.
TuckER is a Tensor-factorization-based embedding technique based on the Tucker decomposition of a third-order binary tensor of triplets. Although being fully expressive, the number of parameters used in Tucker only grows linearly with respect to embedding dimension as the number of entities or relations in a knowledge graph increases. The author also showed in paper that the models, such as RESCAL, DistMult, ComplEx, are all special case of TuckER.
Args:
config (object): Model configuration parameters.
.. _TuckER-Tensor Factorization for Knowledge Graph Completion:
https://arxiv.org/pdf/1901.09590.pdf
"""
def __init__(self, **kwargs):
super(TuckER, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "ent_hidden_size",
"rel_hidden_size", "lmbda", "input_dropout",
"hidden_dropout1", "hidden_dropout2"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
self.d1 = self.ent_hidden_size
self.d2 = self.rel_hidden_size
self.ent_embeddings = NamedEmbedding("ent_embedding", num_total_ent, self.d1)
self.rel_embeddings = NamedEmbedding("rel_embedding", num_total_rel, self.d2)
self.W = NamedEmbedding("W", self.d2, self.d1 * self.d1)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.W.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.W,
]
self.inp_drop = nn.Dropout(self.input_dropout)
self.hidden_dropout1 = nn.Dropout(self.hidden_dropout1)
self.hidden_dropout2 = nn.Dropout(self.hidden_dropout2)
self.loss = Criterion.multi_class_bce
def forward(self, e1, r, direction="head"):
"""Implementation of the layer.
Args:
e1(Tensor): entities id.
r(Tensor): Relation id.
Returns:
Tensors: Returns the activation values.
"""
assert direction in ("head", "tail"), "Unknown forward direction"
e1 = self.ent_embeddings(e1)
e1 = F.normalize(e1, p=2, dim=1)
e1 = self.inp_drop(e1)
e1 = e1.view(-1, 1, self.d1)
rel = self.rel_embeddings(r)
W_mat = torch.matmul(rel, self.W.weight.view(self.d2, -1))
W_mat = W_mat.view(-1, self.d1, self.d1)
W_mat = self.hidden_dropout1(W_mat)
x = torch.matmul(e1, W_mat)
x = x.view(-1, self.d1)
x = F.normalize(x, p=2, dim=1)
x = self.hidden_dropout2(x)
x = torch.matmul(x, self.ent_embeddings.weight.T)
return F.sigmoid(x)
def predict_tail_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="tail"), k=topk)
return rank
def predict_head_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="head"), k=topk)
return rank
class InteractE(ProjectionModel):
"""
`InteractE\: Improving Convolution-based Knowledge Graph Embeddings by Increasing Feature Interactions`_
Args:
config (object): Model configuration parameters.
.. _InteractE\: Improving Convolution-based Knowledge Graph Embeddings by Increasing Feature Interactions:
https://arxiv.org/abs/1911.00219
"""
def __init__(self, **kwargs):
super(InteractE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "input_dropout", "hidden_dropout", "feature_map_dropout",
"feature_permutation", "num_filters", "kernel_size", "reshape_height", "reshape_width"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.hidden_size = self.reshape_width * self.reshape_height
self.device = kwargs["device"]
self.ent_embeddings = NamedEmbedding("ent_embeddings", self.tot_entity, self.hidden_size, padding_idx=None)
self.rel_embeddings = NamedEmbedding("rel_embeddings", self.tot_relation, self.hidden_size, padding_idx=None)
self.bceloss = nn.BCELoss()
self.inp_drop = nn.Dropout(self.input_dropout)
self.hidden_drop = nn.Dropout(self.hidden_dropout)
self.feature_map_drop = nn.Dropout2d(self.feature_map_dropout)
self.bn0 = nn.BatchNorm2d(self.feature_permutation)
flat_sz_h = self.reshape_height
flat_sz_w = 2 * self.reshape_width
self.padding = 0
self.bn1 = nn.BatchNorm2d(self.num_filters * self.feature_permutation)
self.flat_sz = flat_sz_h * flat_sz_w * self.num_filters * self.feature_permutation
self.bn2 = nn.BatchNorm1d(self.hidden_size)
self.fc = nn.Linear(self.flat_sz, self.hidden_size)
self.chequer_perm = self._get_chequer_perm()
self.register_parameter("bias", nn.Parameter(torch.zeros(self.tot_entity)))
self.register_parameter("conv_filt", nn.Parameter(torch.zeros(self.num_filters, 1, self.kernel_size, self.kernel_size)))
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.conv_filt)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.loss = Criterion.multi_class_bce
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
def embed2(self, e, r):
emb_e = self.ent_embeddings(e)
emb_r = self.rel_embeddings(r)
return emb_e, emb_r
def forward(self, e, r, direction="tail"):
assert direction in ("head", "tail"), "Unknown forward direction"
emb_e, emb_r = self.embed2(e, r)
emb_comb = torch.cat([emb_e, emb_r], dim=-1)
chequer_perm = emb_comb[:, self.chequer_perm]
stack_inp = chequer_perm.reshape((-1, self.feature_permutation, 2 * self.reshape_width, self.reshape_height))
stack_inp = self.bn0(stack_inp)
x = self.inp_drop(stack_inp)
x = InteractE._circular_padding_chw(x, self.kernel_size // 2)
x = F.conv2d(x, self.conv_filt.repeat(self.feature_permutation, 1, 1, 1), padding=self.padding, groups=self.feature_permutation)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(-1, self.flat_sz)
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, self.ent_embeddings.weight.transpose(1, 0))
x += self.bias.expand_as(x)
return torch.sigmoid(x)
def predict_tail_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="tail"), k=topk)
return rank
def predict_head_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="head"), k=topk)
return rank
@staticmethod
def _circular_padding_chw(batch, padding):
upper_pad = batch[..., -padding:, :]
lower_pad = batch[..., :padding, :]
temp = torch.cat([upper_pad, batch, lower_pad], dim=2)
left_pad = temp[..., -padding:]
right_pad = temp[..., :padding]
padded = torch.cat([left_pad, temp, right_pad], dim=3)
return padded
def _get_chequer_perm(self):
ent_perm = np.int32([np.random.permutation(self.hidden_size) for _ in range(self.feature_permutation)])
rel_perm = np.int32([np.random.permutation(self.hidden_size) for _ in range(self.feature_permutation)])
comb_idx = []
for k in range(self.feature_permutation):
temp = []
ent_idx, rel_idx = 0, 0
for i in range(self.reshape_height):
for _ in range(self.reshape_width):
if k % 2 == 0:
if i % 2 == 0:
temp.append(ent_perm[k, ent_idx])
ent_idx += 1
temp.append(rel_perm[k, rel_idx] + self.hidden_size)
rel_idx += 1
else:
temp.append(rel_perm[k, rel_idx] + self.hidden_size)
rel_idx += 1
temp.append(ent_perm[k, ent_idx])
ent_idx += 1
else:
if i % 2 == 0:
temp.append(rel_perm[k, rel_idx] + self.hidden_size)
rel_idx += 1
temp.append(ent_perm[k, ent_idx])
ent_idx += 1
else:
temp.append(ent_perm[k, ent_idx])
ent_idx += 1
temp.append(rel_perm[k, rel_idx] + self.hidden_size)
rel_idx += 1
comb_idx.append(temp)
chequer_perm = torch.LongTensor(np.int32(comb_idx)).to(self.device)
return chequer_perm
class HypER(ProjectionModel):
"""
`HypER\: Hypernetwork Knowledge Graph Embeddings`_
Args:
config (object): Model configuration parameters.
.. _HypER\: Hypernetwork Knowledge Graph Embeddings:
https://arxiv.org/abs/1808.07018
"""
def __init__(self, **kwargs):
super(HypER, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "ent_hidden_size", "rel_hidden_size", "input_dropout", "hidden_dropout", "feature_map_dropout"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
self.device = kwargs["device"]
self.filt_h = 1
self.filt_w = 9
self.in_channels = 1
self.out_channels = 32
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
self.ent_embeddings = NamedEmbedding("ent_embeddings", num_total_ent, self.ent_hidden_size, padding_idx=0)
self.rel_embeddings = NamedEmbedding("rel_embeddings", num_total_rel, self.rel_hidden_size, padding_idx=0)
self.inp_drop = nn.Dropout(self.input_dropout)
self.hidden_drop = nn.Dropout(self.hidden_dropout)
self.feature_map_drop = nn.Dropout2d(self.feature_map_dropout)
self.bn0 = torch.nn.BatchNorm2d(self.in_channels)
self.bn1 = torch.nn.BatchNorm2d(self.out_channels)
self.bn2 = torch.nn.BatchNorm1d(self.ent_hidden_size)
self.register_parameter("b", nn.Parameter(torch.zeros(num_total_ent)))
fc_length = (1 - self.filt_h + 1) * (self.ent_hidden_size - self.filt_w + 1) * self.out_channels
self.fc = torch.nn.Linear(fc_length, self.ent_hidden_size)
fc1_length = self.in_channels * self.out_channels * self.filt_h * self.filt_w
self.fc1 = torch.nn.Linear(self.rel_hidden_size, fc1_length)
nn.init.xavier_uniform_(self.ent_embeddings.weight.data)
nn.init.xavier_uniform_(self.rel_embeddings.weight.data)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.loss = Criterion.multi_class_bce
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
def embed2(self, e, r):
emb_e = self.ent_embeddings(e)
emb_r = self.rel_embeddings(r)
return emb_e, emb_r
def forward(self, e, r, direction="tail"):
assert direction in ("head", "tail"), "Unknown forward direction"
e_emb, r_emb = self.embed2(e, r)
e_emb = e_emb.view(-1, 1, 1, self.ent_embeddings.weight.size(1))
x = self.bn0(e_emb)
x = self.inp_drop(x)
k = self.fc1(r_emb)
k = k.view(-1, self.in_channels, self.out_channels, self.filt_h, self.filt_w)
k = k.view(e_emb.size(0) * self.in_channels * self.out_channels, 1, self.filt_h, self.filt_w)
x = x.permute(1, 0, 2, 3)
x = F.conv2d(x, k, groups=e_emb.size(0))
x = x.view(e_emb.size(0), 1, self.out_channels, 1 - self.filt_h + 1, e_emb.size(3) - self.filt_w + 1)
x = x.permute(0, 3, 4, 1, 2)
x = torch.sum(x, dim=3)
x = x.permute(0, 3, 1, 2).contiguous()
x = self.bn1(x)
x = self.feature_map_drop(x)
x = x.view(e_emb.size(0), -1)
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, self.ent_embeddings.weight.transpose(1, 0))
x += self.b.expand_as(x)
pred = F.sigmoid(x)
return pred
def predict_tail_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="tail"), k=topk)
return rank
def predict_head_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="head"), k=topk)
return rank
class AcrE(ProjectionModel):
"""
`Knowledge Graph Embedding with Atrous Convolution and Residual Learning`_
Args:
config (object): Model configuration parameters.
.. _Knowledge Graph Embedding with Atrous Convolution and Residual Learning:
https://arxiv.org/abs/2010.12121
"""
def __init__(self, **kwargs):
super(AcrE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "input_dropout", "hidden_dropout", "feature_map_dropout",
"in_channels", "way", "first_atrous", "second_atrous", "third_atrous", "acre_bias"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.ent_embeddings = NamedEmbedding("ent_embedding", num_total_ent, k, padding_idx=None)
self.rel_embeddings = NamedEmbedding("rel_embedding", num_total_rel * 2, k, padding_idx=None)
self.inp_drop = torch.nn.Dropout(self.input_dropout)
self.hidden_drop = torch.nn.Dropout(self.hidden_dropout)
self.feature_map_drop = torch.nn.Dropout2d(self.feature_map_dropout)
self.bn0 = torch.nn.BatchNorm2d(1)
self.bn1 = torch.nn.BatchNorm2d(self.in_channels)
self.bn2 = torch.nn.BatchNorm1d(k)
self.fc = torch.nn.Linear(self.in_channels * 400, k)
self.padding = 0
if self.way == "serial":
self.conv1 = torch.nn.Conv2d(1, self.in_channels, (3, 3), 1, self.first_atrous, bias=self.acre_bias,
dilation=self.first_atrous)
self.conv2 = torch.nn.Conv2d(self.in_channels, self.in_channels, (3, 3), 1, self.second_atrous,
bias=self.acre_bias, dilation=self.second_atrous)
self.conv3 = torch.nn.Conv2d(self.in_channels, self.in_channels, (3, 3), 1, self.third_atrous, bias=self.acre_bias,
dilation=self.third_atrous)
else:
self.conv1 = torch.nn.Conv2d(1, self.in_channels, (3, 3), 1, self.first_atrous, bias=self.acre_bias,
dilation=self.first_atrous)
self.conv2 = torch.nn.Conv2d(1, self.in_channels, (3, 3), 1, self.second_atrous, bias=self.acre_bias,
dilation=self.second_atrous)
self.conv3 = torch.nn.Conv2d(1, self.in_channels, (3, 3), 1, self.third_atrous, bias=self.acre_bias,
dilation=self.third_atrous)
self.W_gate_e = torch.nn.Linear(1600, 400)
self.register_parameter("bias", nn.Parameter(torch.zeros(num_total_ent)))
nn.init.xavier_uniform_(self.ent_embeddings.weight.data)
nn.init.xavier_uniform_(self.rel_embeddings.weight.data)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.loss = Criterion.multi_class_bce
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
def embed2(self, e, r):
emb_e = self.ent_embeddings(e)
emb_r = self.rel_embeddings(r)
return emb_e, emb_r
def forward(self, e, r, direction="tail"):
assert direction in ("head", "tail"), "Unknown forward direction"
emb_e, emb_r = self.embed2(e, r)
sub_emb = emb_e.view(-1, 1, 10, 20)
rel_emb = emb_r.view(-1, 1, 10, 20)
comb_emb = torch.cat([sub_emb, rel_emb], dim=2)
stack_inp = self.bn0(comb_emb)
x = self.inp_drop(stack_inp)
res = x
if self.way == "serial":
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x + res
else:
conv1 = self.conv1(x).view(-1, self.in_channels, 400)
conv2 = self.conv2(x).view(-1, self.in_channels, 400)
conv3 = self.conv3(x).view(-1, self.in_channels, 400)
res = res.expand(-1, self.in_channels, 20, 20).view(-1, self.in_channels, 400)
x = torch.cat((res, conv1, conv2, conv3), dim=2)
x = self.W_gate_e(x).view(-1, self.in_channels, 20, 20)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, self.ent_embeddings.weight.transpose(1, 0))
x += self.bias.expand_as(x)
return torch.sigmoid(x)
def predict_tail_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="tail"), k=topk)
return rank
def predict_head_rank(self, e, r, topk=-1):
_, rank = torch.topk(-self.forward(e, r, direction="head"), k=topk)
return rank
| 30,964 | 40.452477 | 479 | py |
pykg2vec | pykg2vec-master/pykg2vec/models/pointwise.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import numpy as np
from numpy.random import RandomState
from pykg2vec.models.KGMeta import PointwiseModel
from pykg2vec.models.Domain import NamedEmbedding
from pykg2vec.utils.criterion import Criterion
class ANALOGY(PointwiseModel):
"""
`Analogical Inference for Multi-relational Embeddings`_
Args:
config (object): Model configuration parameters.
.. _Analogical Inference for Multi-relational Embeddings:
http://proceedings.mlr.press/v70/liu17d/liu17d.pdf
"""
def __init__(self, **kwargs):
super(ANALOGY, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
k = self.hidden_size
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, k)
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, k)
self.ent_embeddings_real = NamedEmbedding("emb_e_real", self.tot_entity, k // 2)
self.ent_embeddings_img = NamedEmbedding("emb_e_img", self.tot_entity, k // 2)
self.rel_embeddings_real = NamedEmbedding("emb_rel_real", self.tot_relation, k // 2)
self.rel_embeddings_img = NamedEmbedding("emb_rel_img", self.tot_relation, k // 2)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.ent_embeddings_real.weight)
nn.init.xavier_uniform_(self.ent_embeddings_img.weight)
nn.init.xavier_uniform_(self.rel_embeddings_real.weight)
nn.init.xavier_uniform_(self.rel_embeddings_img.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
self.ent_embeddings_real,
self.ent_embeddings_img,
self.rel_embeddings_real,
self.rel_embeddings_img,
]
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
h_emb = self.ent_embeddings(h)
r_emb = self.rel_embeddings(r)
t_emb = self.ent_embeddings(t)
return h_emb, r_emb, t_emb
def embed_complex(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns real and imaginary values of head, relation and tail embedding.
"""
h_emb_real = self.ent_embeddings_real(h)
h_emb_img = self.ent_embeddings_img(h)
r_emb_real = self.rel_embeddings_real(r)
r_emb_img = self.rel_embeddings_img(r)
t_emb_real = self.ent_embeddings_real(t)
t_emb_img = self.ent_embeddings_img(t)
return h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
h_e_real, h_e_img, r_e_real, r_e_img, t_e_real, t_e_img = self.embed_complex(h, r, t)
complex_loss = -(h_e_real * t_e_real * r_e_real + h_e_img * t_e_img * r_e_real + h_e_real * t_e_img * r_e_img - h_e_img * t_e_real * r_e_img).sum(axis=-1)
distmult_loss = -(h_e * r_e * t_e).sum(axis=-1)
return complex_loss + distmult_loss
def get_reg(self, h, r, t, reg_type="F2"):
h_e, r_e, t_e = self.embed(h, r, t)
h_e_real, h_e_img, r_e_real, r_e_img, t_e_real, t_e_img = self.embed_complex(h, r, t)
if reg_type.lower() == 'f2':
regul_term = (h_e_real ** 2 + h_e_img ** 2 + r_e_real ** 2 + r_e_img ** 2 + t_e_real ** 2 + t_e_img ** 2).sum(axis=-1).mean()
regul_term += (h_e ** 2 + r_e ** 2 + t_e ** 2).sum(axis=-1).mean()
elif reg_type.lower() == 'n3':
regul_term = (h_e_real ** 3 + h_e_img ** 3 + r_e_real ** 3 + r_e_img ** 3 + t_e_real ** 3 + t_e_img ** 3).sum(axis=-1).mean()
regul_term += (h_e ** 3 + r_e ** 3 + t_e ** 3).sum(axis=-1).mean()
else:
raise NotImplementedError('Unknown regularizer type: %s' % reg_type)
return self.lmbda*regul_term
class Complex(PointwiseModel):
"""
`Complex Embeddings for Simple Link Prediction`_ (ComplEx) is an enhanced version of DistMult in that it uses complex-valued embeddings
to represent both entities and relations. Using the complex-valued embedding allows
the defined scoring function in ComplEx to differentiate that facts with assymmetric relations.
Args:
config (object): Model configuration parameters.
.. _Complex Embeddings for Simple Link Prediction:
http://proceedings.mlr.press/v48/trouillon16.pdf
"""
def __init__(self, **kwargs):
super(Complex, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.ent_embeddings_real = NamedEmbedding("emb_e_real", num_total_ent, k)
self.ent_embeddings_img = NamedEmbedding("emb_e_img", num_total_ent, k)
self.rel_embeddings_real = NamedEmbedding("emb_rel_real", num_total_rel, k)
self.rel_embeddings_img = NamedEmbedding("emb_rel_img", num_total_rel, k)
nn.init.xavier_uniform_(self.ent_embeddings_real.weight)
nn.init.xavier_uniform_(self.ent_embeddings_img.weight)
nn.init.xavier_uniform_(self.rel_embeddings_real.weight)
nn.init.xavier_uniform_(self.rel_embeddings_img.weight)
self.parameter_list = [
self.ent_embeddings_real,
self.ent_embeddings_img,
self.rel_embeddings_real,
self.rel_embeddings_img,
]
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns real and imaginary values of head, relation and tail embedding.
"""
h_emb_real = self.ent_embeddings_real(h)
h_emb_img = self.ent_embeddings_img(h)
r_emb_real = self.rel_embeddings_real(r)
r_emb_img = self.rel_embeddings_img(r)
t_emb_real = self.ent_embeddings_real(t)
t_emb_img = self.ent_embeddings_img(t)
return h_emb_real, h_emb_img, r_emb_real, r_emb_img, t_emb_real, t_emb_img
def forward(self, h, r, t):
h_e_real, h_e_img, r_e_real, r_e_img, t_e_real, t_e_img = self.embed(h, r, t)
return -torch.sum(h_e_real * t_e_real * r_e_real + h_e_img * t_e_img * r_e_real +
h_e_real * t_e_img * r_e_img - h_e_img * t_e_real * r_e_img, -1)
def get_reg(self, h, r, t, reg_type="F2"):
h_e_real, h_e_img, r_e_real, r_e_img, t_e_real, t_e_img = self.embed(h, r, t)
if reg_type.lower() == 'f2':
regul_term = torch.mean(torch.sum(h_e_real ** 2, -1) + torch.sum(h_e_img ** 2, -1) + torch.sum(r_e_real ** 2, -1) +
torch.sum(r_e_img ** 2, -1) + torch.sum(t_e_real ** 2, -1) + torch.sum(t_e_img ** 2, -1))
elif reg_type.lower() == 'n3':
regul_term = torch.mean(torch.sum(h_e_real ** 3, -1) + torch.sum(h_e_img ** 3, -1) + torch.sum(r_e_real ** 3, -1) +
torch.sum(r_e_img ** 3, -1) + torch.sum(t_e_real ** 3, -1) + torch.sum(t_e_img ** 3, -1))
else:
raise NotImplementedError('Unknown regularizer type: %s' % reg_type)
return self.lmbda*regul_term
class ComplexN3(Complex):
"""
`Complex Embeddings for Simple Link Prediction`_ (ComplEx) is an enhanced version of DistMult in that it uses complex-valued embeddings
to represent both entities and relations. Using the complex-valued embedding allows
the defined scoring function in ComplEx to differentiate that facts with assymmetric relations.
Args:
config (object): Model configuration parameters.
.. _Complex Embeddings for Simple Link Prediction:
http://proceedings.mlr.press/v48/trouillon16.pdf
"""
def __init__(self, **kwargs):
super(ComplexN3, self).__init__(**kwargs)
self.model_name = 'complexn3'
self.loss = Criterion.pointwise_logistic
def get_reg(self, h, r, t, reg_type="N3"):
h_e_real, h_e_img, r_e_real, r_e_img, t_e_real, t_e_img = self.embed(h, r, t)
if reg_type.lower() == 'f2':
regul_term = torch.mean(torch.sum(h_e_real.abs() ** 2, -1) + torch.sum(h_e_img.abs() ** 2, -1) +
torch.sum(r_e_real.abs() ** 2, -1) + torch.sum(r_e_img.abs() ** 2, -1) +
torch.sum(t_e_real.abs() ** 2, -1) + torch.sum(t_e_img.abs() ** 2, -1))
elif reg_type.lower() == 'n3':
regul_term = torch.mean(torch.sum(h_e_real.abs() ** 3, -1) + torch.sum(h_e_img.abs() ** 3, -1) +
torch.sum(r_e_real.abs() ** 3, -1) + torch.sum(r_e_img.abs() ** 3, -1) +
torch.sum(t_e_real.abs() ** 3, -1) + torch.sum(t_e_img.abs() ** 3, -1))
else:
raise NotImplementedError('Unknown regularizer type: %s' % reg_type)
return self.lmbda*regul_term
class ConvKB(PointwiseModel):
"""
In `A Novel Embedding Model for Knowledge Base Completion Based on Convolutional Neural Network`_ (ConvKB),
each triple (head entity, relation, tail entity) is represented as a 3-column matrix where each column vector represents a triple element
Portion of the code based on daiquocnguyen_.
Args:
config (object): Model configuration parameters.
.. _daiquocnguyen:
https://github.com/daiquocnguyen/ConvKB
.. _A Novel Embedding Model for Knowledge Base Completion Based on Convolutional Neural Network:
https://www.aclweb.org/anthology/N18-2053
"""
def __init__(self, **kwargs):
super(ConvKB, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "num_filters", "filter_sizes"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
num_filters = self.num_filters
filter_sizes = self.filter_sizes
device = kwargs["device"]
self.ent_embeddings = NamedEmbedding("ent_embedding", num_total_ent, k)
self.rel_embeddings = NamedEmbedding("rel_embedding", num_total_rel, k)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.conv_list = [nn.Conv2d(1, num_filters, (3, filter_size), stride=(1, 1)).to(device) for filter_size in filter_sizes]
conv_out_dim = num_filters*sum([(k-filter_size+1) for filter_size in filter_sizes])
self.fc1 = nn.Linear(in_features=conv_out_dim, out_features=1, bias=True)
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.ent_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.ent_embeddings(t)
return emb_h, emb_r, emb_t
def forward(self, h, r, t):
h_emb, r_emb, t_emb = self.embed(h, r, t)
first_dimen = list(h_emb.shape)[0]
stacked_h = torch.unsqueeze(h_emb, dim=1)
stacked_r = torch.unsqueeze(r_emb, dim=1)
stacked_t = torch.unsqueeze(t_emb, dim=1)
stacked_hrt = torch.cat([stacked_h, stacked_r, stacked_t], dim=1)
stacked_hrt = torch.unsqueeze(stacked_hrt, dim=1) # [b, 1, 3, k]
stacked_hrt = [conv_layer(stacked_hrt) for conv_layer in self.conv_list]
stacked_hrt = torch.cat(stacked_hrt, dim=3)
stacked_hrt = stacked_hrt.view(first_dimen, -1)
preds = self.fc1(stacked_hrt)
preds = torch.squeeze(preds, dim=-1)
return preds
class CP(PointwiseModel):
"""
`Canonical Tensor Decomposition for Knowledge Base Completion`_
Args:
config (object): Model configuration parameters.
.. _Canonical Tensor Decomposition for Knowledge Base Completion:
http://proceedings.mlr.press/v80/lacroix18a/lacroix18a.pdf
"""
def __init__(self, **kwargs):
super(CP, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.sub_embeddings = NamedEmbedding("sub_embedding", num_total_ent, k)
self.rel_embeddings = NamedEmbedding("rel_embedding", num_total_rel, k)
self.obj_embeddings = NamedEmbedding("obj_embedding", num_total_ent, k)
nn.init.xavier_uniform_(self.sub_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.obj_embeddings.weight)
self.parameter_list = [
self.sub_embeddings,
self.rel_embeddings,
self.obj_embeddings,
]
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self.sub_embeddings(h)
emb_r = self.rel_embeddings(r)
emb_t = self.obj_embeddings(t)
return emb_h, emb_r, emb_t
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
return -torch.sum(h_e * r_e * t_e, -1)
def get_reg(self, h, r, t, reg_type='N3'):
h_e, r_e, t_e = self.embed(h, r, t)
if reg_type.lower() == 'f2':
regul_term = torch.mean(torch.sum(h_e**2, -1) + torch.sum(r_e**2, -1) + torch.sum(t_e**2, -1))
elif reg_type.lower() == 'n3':
regul_term = torch.mean(torch.sum(h_e**3, -1) + torch.sum(r_e**3, -1) + torch.sum(t_e**3, -1))
else:
raise NotImplementedError('Unknown regularizer type: %s' % reg_type)
return self.lmbda * regul_term
class DistMult(PointwiseModel):
"""
`EMBEDDING ENTITIES AND RELATIONS FOR LEARNING AND INFERENCE IN KNOWLEDGE BASES`_ (DistMult) is a simpler model comparing with RESCAL in that it simplifies
the weight matrix used in RESCAL to a diagonal matrix. The scoring
function used DistMult can capture the pairwise interactions between
the head and the tail entities. However, DistMult has limitation on modeling asymmetric relations.
Args:
config (object): Model configuration parameters.
.. _EMBEDDING ENTITIES AND RELATIONS FOR LEARNING AND INFERENCE IN KNOWLEDGE BASES:
https://arxiv.org/pdf/1412.6575.pdf
"""
def __init__(self, **kwargs):
super(DistMult, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.ent_embeddings = NamedEmbedding("ent_embedding", num_total_ent, k)
self.rel_embeddings = NamedEmbedding("rel_embedding", num_total_rel, k)
nn.init.xavier_uniform_(self.ent_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
h_emb = self.ent_embeddings(h)
r_emb = self.rel_embeddings(r)
t_emb = self.ent_embeddings(t)
return h_emb, r_emb, t_emb
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
return -torch.sum(h_e*r_e*t_e, -1)
def get_reg(self, h, r, t, reg_type="F2"):
h_e, r_e, t_e = self.embed(h, r, t)
if reg_type.lower() == 'f2':
regul_term = torch.mean(torch.sum(h_e ** 2, -1) + torch.sum(r_e ** 2, -1) + torch.sum(t_e ** 2, -1))
elif reg_type.lower() == 'n3':
regul_term = torch.mean(torch.sum(h_e ** 3, -1) + torch.sum(r_e ** 3, -1) + torch.sum(t_e ** 3, -1))
else:
raise NotImplementedError('Unknown regularizer type: %s' % reg_type)
return self.lmbda*regul_term
class SimplE(PointwiseModel):
"""
`SimplE Embedding for Link Prediction in Knowledge Graphs`_
Args:
config (object): Model configuration parameters.
.. _SimplE Embedding for Link Prediction in Knowledge Graphs:
https://papers.nips.cc/paper/7682-simple-embedding-for-link-prediction-in-knowledge-graphs.pdf
"""
def __init__(self, **kwargs):
super(SimplE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.tot_train_triples = kwargs['tot_train_triples']
self.batch_size = kwargs['batch_size']
self.ent_head_embeddings = NamedEmbedding("ent_head_embedding", num_total_ent, k)
self.ent_tail_embeddings = NamedEmbedding("ent_tail_embedding", num_total_ent, k)
self.rel_embeddings = NamedEmbedding("rel_embedding", num_total_rel, k)
self.rel_inv_embeddings = NamedEmbedding("rel_inv_embedding", num_total_rel, k)
nn.init.xavier_uniform_(self.ent_head_embeddings.weight)
nn.init.xavier_uniform_(self.ent_tail_embeddings.weight)
nn.init.xavier_uniform_(self.rel_embeddings.weight)
nn.init.xavier_uniform_(self.rel_inv_embeddings.weight)
self.parameter_list = [
self.ent_head_embeddings,
self.ent_tail_embeddings,
self.rel_embeddings,
self.rel_inv_embeddings,
]
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h1 = self.ent_head_embeddings(h)
emb_h2 = self.ent_head_embeddings(t)
emb_r1 = self.rel_embeddings(r)
emb_r2 = self.rel_inv_embeddings(r)
emb_t1 = self.ent_tail_embeddings(t)
emb_t2 = self.ent_tail_embeddings(h)
return emb_h1, emb_h2, emb_r1, emb_r2, emb_t1, emb_t2
def forward(self, h, r, t):
h1_e, h2_e, r1_e, r2_e, t1_e, t2_e = self.embed(h, r, t)
init = torch.sum(h1_e*r1_e*t1_e, 1) + torch.sum(h2_e*r2_e*t2_e, 1) / 2.0
return -torch.clamp(init, -20, 20)
def get_reg(self, h, r, t, reg_type="F2"):
if reg_type.lower() == 'f2':
regul_term = torch.mean(torch.sum(h.type(torch.FloatTensor) ** 2, -1) + torch.sum(r.type(torch.FloatTensor) ** 2, -1) + torch.sum(t.type(torch.FloatTensor) ** 2, -1))
elif reg_type.lower() == 'n3':
regul_term = torch.mean(torch.sum(h.type(torch.FloatTensor) ** 3, -1) + torch.sum(r.type(torch.FloatTensor) ** 3, -1) + torch.sum(t.type(torch.FloatTensor) ** 3, -1))
else:
raise NotImplementedError('Unknown regularizer type: %s' % reg_type)
return self.lmbda * regul_term
class SimplE_ignr(SimplE):
"""
`SimplE Embedding for Link Prediction in Knowledge Graphs`_
Args:
config (object): Model configuration parameters.
.. _SimplE Embedding for Link Prediction in Knowledge Graphs:
https://papers.nips.cc/paper/7682-simple-embedding-for-link-prediction-in-knowledge-graphs.pdf
"""
def __init__(self, **kwargs):
super(SimplE_ignr, self).__init__(**kwargs)
self.model_name = 'simple_ignr'
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h = self._concat_selected_embeddings(self.ent_head_embeddings, h, self.ent_head_embeddings, t)
emb_r = self._concat_selected_embeddings(self.rel_embeddings, r, self.rel_inv_embeddings, r)
emb_t = self._concat_selected_embeddings(self.ent_tail_embeddings, t, self.ent_tail_embeddings, h)
return emb_h, emb_r, emb_t
def forward(self, h, r, t):
h_e, r_e, t_e = self.embed(h, r, t)
init = torch.sum(h_e*r_e*t_e, 1)
return -torch.clamp(init, -20, 20)
@staticmethod
def _concat_selected_embeddings(e1, t1, e2, t2):
return torch.cat([torch.index_select(e1.weight, 0, t1), torch.index_select(e2.weight, 0, t2)], 1)
class QuatE(PointwiseModel):
"""
`Quaternion Knowledge Graph Embeddings`_
Args:
config (object): Model configuration parameters.
.. _cheungdaven: https://github.com/cheungdaven/QuatE.git
.. _Quaternion Knowledge Graph Embeddings:
https://arxiv.org/abs/1904.10281
"""
def __init__(self, **kwargs):
super(QuatE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.ent_s_embedding = NamedEmbedding("ent_s_embedding", num_total_ent, k)
self.ent_x_embedding = NamedEmbedding("ent_x_embedding", num_total_ent, k)
self.ent_y_embedding = NamedEmbedding("ent_y_embedding", num_total_ent, k)
self.ent_z_embedding = NamedEmbedding("ent_z_embedding", num_total_ent, k)
self.rel_s_embedding = NamedEmbedding("rel_s_embedding", num_total_rel, k)
self.rel_x_embedding = NamedEmbedding("rel_x_embedding", num_total_rel, k)
self.rel_y_embedding = NamedEmbedding("rel_y_embedding", num_total_rel, k)
self.rel_z_embedding = NamedEmbedding("rel_z_embedding", num_total_rel, k)
self.rel_w_embedding = NamedEmbedding("rel_w_embedding", num_total_rel, k)
self.fc = nn.Linear(100, 50, bias=False)
self.ent_dropout = nn.Dropout(0)
self.rel_dropout = nn.Dropout(0)
self.bn = nn.BatchNorm1d(k)
r, i, j, k = QuatE._quaternion_init(self.tot_entity, self.hidden_size)
r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k)
self.ent_s_embedding.weight.data = r.type_as(self.ent_s_embedding.weight.data)
self.ent_x_embedding.weight.data = i.type_as(self.ent_x_embedding.weight.data)
self.ent_y_embedding.weight.data = j.type_as(self.ent_y_embedding.weight.data)
self.ent_z_embedding.weight.data = k.type_as(self.ent_z_embedding.weight.data)
s, x, y, z = QuatE._quaternion_init(self.tot_entity, self.hidden_size)
s, x, y, z = torch.from_numpy(s), torch.from_numpy(x), torch.from_numpy(y), torch.from_numpy(z)
self.rel_s_embedding.weight.data = s.type_as(self.rel_s_embedding.weight.data)
self.rel_x_embedding.weight.data = x.type_as(self.rel_x_embedding.weight.data)
self.rel_y_embedding.weight.data = y.type_as(self.rel_y_embedding.weight.data)
self.rel_z_embedding.weight.data = z.type_as(self.rel_z_embedding.weight.data)
nn.init.xavier_uniform_(self.ent_s_embedding.weight.data)
nn.init.xavier_uniform_(self.ent_x_embedding.weight.data)
nn.init.xavier_uniform_(self.ent_y_embedding.weight.data)
nn.init.xavier_uniform_(self.ent_z_embedding.weight.data)
nn.init.xavier_uniform_(self.rel_s_embedding.weight.data)
nn.init.xavier_uniform_(self.rel_x_embedding.weight.data)
nn.init.xavier_uniform_(self.rel_y_embedding.weight.data)
nn.init.xavier_uniform_(self.rel_z_embedding.weight.data)
nn.init.xavier_uniform_(self.rel_w_embedding.weight.data)
self.parameter_list = [
self.ent_s_embedding,
self.ent_x_embedding,
self.ent_y_embedding,
self.ent_z_embedding,
self.rel_s_embedding,
self.rel_x_embedding,
self.rel_y_embedding,
self.rel_z_embedding,
self.rel_w_embedding,
]
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
s_emb_h = self.ent_s_embedding(h)
x_emb_h = self.ent_x_embedding(h)
y_emb_h = self.ent_y_embedding(h)
z_emb_h = self.ent_z_embedding(h)
s_emb_t = self.ent_s_embedding(t)
x_emb_t = self.ent_x_embedding(t)
y_emb_t = self.ent_y_embedding(t)
z_emb_t = self.ent_z_embedding(t)
s_emb_r = self.rel_s_embedding(r)
x_emb_r = self.rel_x_embedding(r)
y_emb_r = self.rel_y_embedding(r)
z_emb_r = self.rel_z_embedding(r)
return s_emb_h, x_emb_h, y_emb_h, z_emb_h, s_emb_t, x_emb_t, y_emb_t, z_emb_t, s_emb_r, x_emb_r, y_emb_r, z_emb_r
def forward(self, h, r, t):
s_emb_h, x_emb_h, y_emb_h, z_emb_h, s_emb_t, x_emb_t, y_emb_t, z_emb_t, s_emb_r, x_emb_r, y_emb_r, z_emb_r = self.embed(h, r, t)
denominator_b = torch.sqrt(s_emb_r ** 2 + x_emb_r ** 2 + y_emb_r ** 2 + z_emb_r ** 2)
s_emb_r = s_emb_r / denominator_b
x_emb_r = x_emb_r / denominator_b
y_emb_r = y_emb_r / denominator_b
z_emb_r = z_emb_r / denominator_b
a = s_emb_h * s_emb_r - x_emb_h * x_emb_r - y_emb_h * y_emb_r - z_emb_h * z_emb_r
b = s_emb_h * x_emb_r + s_emb_r * x_emb_h + y_emb_h * z_emb_r - y_emb_r * z_emb_h
c = s_emb_h * y_emb_r + s_emb_r * y_emb_h + z_emb_h * x_emb_r - z_emb_r * x_emb_h
d = s_emb_h * z_emb_r + s_emb_r * z_emb_h + x_emb_h * y_emb_r - x_emb_r * y_emb_h
score_r = (a * s_emb_t + b * x_emb_t + c * y_emb_t + d * z_emb_t)
return -torch.sum(score_r, -1)
def get_reg(self, h, r, t, reg_type='N3'):
s_emb_h, x_emb_h, y_emb_h, z_emb_h, s_emb_t, x_emb_t, y_emb_t, z_emb_t, s_emb_r, x_emb_r, y_emb_r, z_emb_r = self.embed(h, r, t)
if reg_type.lower() == 'f2':
regul = (torch.mean(torch.abs(s_emb_h) ** 2)
+ torch.mean(torch.abs(x_emb_h) ** 2)
+ torch.mean(torch.abs(y_emb_h) ** 2)
+ torch.mean(torch.abs(z_emb_h) ** 2)
+ torch.mean(torch.abs(s_emb_t) ** 2)
+ torch.mean(torch.abs(x_emb_t) ** 2)
+ torch.mean(torch.abs(y_emb_t) ** 2)
+ torch.mean(torch.abs(z_emb_t) ** 2)
)
regul2 = (torch.mean(torch.abs(s_emb_r) ** 2)
+ torch.mean(torch.abs(x_emb_r) ** 2)
+ torch.mean(torch.abs(y_emb_r) ** 2)
+ torch.mean(torch.abs(z_emb_r) ** 2))
elif reg_type.lower() == 'n3':
regul = (torch.mean(torch.abs(s_emb_h) ** 3)
+ torch.mean(torch.abs(x_emb_h) ** 3)
+ torch.mean(torch.abs(y_emb_h) ** 3)
+ torch.mean(torch.abs(z_emb_h) ** 3)
+ torch.mean(torch.abs(s_emb_t) ** 3)
+ torch.mean(torch.abs(x_emb_t) ** 3)
+ torch.mean(torch.abs(y_emb_t) ** 3)
+ torch.mean(torch.abs(z_emb_t) ** 3)
)
regul2 = (torch.mean(torch.abs(s_emb_r) ** 3)
+ torch.mean(torch.abs(x_emb_r) ** 3)
+ torch.mean(torch.abs(y_emb_r) ** 3)
+ torch.mean(torch.abs(z_emb_r) ** 3))
else:
raise NotImplementedError('Unknown regularizer type: %s' % reg_type)
return self.lmbda * (regul + regul2)
@staticmethod
def _quaternion_init(in_features, out_features, criterion='he'):
fan_in = in_features
fan_out = out_features
if criterion == 'glorot':
s = 1. / np.sqrt(2 * (fan_in + fan_out))
elif criterion == 'he':
s = 1. / np.sqrt(2 * fan_in)
else:
raise ValueError('Invalid criterion: ', criterion)
rng = RandomState(123)
kernel_shape = (in_features, out_features)
number_of_weights = np.prod(kernel_shape)
v_i = np.random.uniform(0.0, 1.0, number_of_weights)
v_j = np.random.uniform(0.0, 1.0, number_of_weights)
v_k = np.random.uniform(0.0, 1.0, number_of_weights)
for i in range(0, number_of_weights):
norm = np.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2) + 0.0001
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
modulus = rng.uniform(low=-s, high=s, size=kernel_shape)
phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
weight_r = modulus * np.cos(phase)
weight_i = modulus * v_i * np.sin(phase)
weight_j = modulus * v_j * np.sin(phase)
weight_k = modulus * v_k * np.sin(phase)
return weight_r, weight_i, weight_j, weight_k
class OctonionE(PointwiseModel):
"""
`Quaternion Knowledge Graph Embeddings`_
Args:
config (object): Model configuration parameters.
.. _cheungdaven: https://github.com/cheungdaven/QuatE.git
.. _Quaternion Knowledge Graph Embeddings:
https://arxiv.org/abs/1904.10281
"""
def __init__(self, **kwargs):
super(OctonionE, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
num_total_ent = self.tot_entity
num_total_rel = self.tot_relation
k = self.hidden_size
self.ent_embedding_1 = NamedEmbedding("ent_embedding_1", num_total_ent, k)
self.ent_embedding_2 = NamedEmbedding("ent_embedding_2", num_total_ent, k)
self.ent_embedding_3 = NamedEmbedding("ent_embedding_3", num_total_ent, k)
self.ent_embedding_4 = NamedEmbedding("ent_embedding_4", num_total_ent, k)
self.ent_embedding_5 = NamedEmbedding("ent_embedding_5", num_total_ent, k)
self.ent_embedding_6 = NamedEmbedding("ent_embedding_6", num_total_ent, k)
self.ent_embedding_7 = NamedEmbedding("ent_embedding_7", num_total_ent, k)
self.ent_embedding_8 = NamedEmbedding("ent_embedding_8", num_total_ent, k)
self.rel_embedding_1 = NamedEmbedding("rel_embedding_1", num_total_rel, k)
self.rel_embedding_2 = NamedEmbedding("rel_embedding_2", num_total_rel, k)
self.rel_embedding_3 = NamedEmbedding("rel_embedding_3", num_total_rel, k)
self.rel_embedding_4 = NamedEmbedding("rel_embedding_4", num_total_rel, k)
self.rel_embedding_5 = NamedEmbedding("rel_embedding_5", num_total_rel, k)
self.rel_embedding_6 = NamedEmbedding("rel_embedding_6", num_total_rel, k)
self.rel_embedding_7 = NamedEmbedding("rel_embedding_7", num_total_rel, k)
self.rel_embedding_8 = NamedEmbedding("rel_embedding_8", num_total_rel, k)
self.rel_w_embedding = NamedEmbedding("rel_w_embedding", num_total_rel, k)
nn.init.xavier_uniform_(self.ent_embedding_1.weight.data)
nn.init.xavier_uniform_(self.ent_embedding_2.weight.data)
nn.init.xavier_uniform_(self.ent_embedding_3.weight.data)
nn.init.xavier_uniform_(self.ent_embedding_4.weight.data)
nn.init.xavier_uniform_(self.ent_embedding_5.weight.data)
nn.init.xavier_uniform_(self.ent_embedding_6.weight.data)
nn.init.xavier_uniform_(self.ent_embedding_7.weight.data)
nn.init.xavier_uniform_(self.ent_embedding_8.weight.data)
nn.init.xavier_uniform_(self.rel_embedding_1.weight.data)
nn.init.xavier_uniform_(self.rel_embedding_2.weight.data)
nn.init.xavier_uniform_(self.rel_embedding_3.weight.data)
nn.init.xavier_uniform_(self.rel_embedding_4.weight.data)
nn.init.xavier_uniform_(self.rel_embedding_5.weight.data)
nn.init.xavier_uniform_(self.rel_embedding_6.weight.data)
nn.init.xavier_uniform_(self.rel_embedding_7.weight.data)
nn.init.xavier_uniform_(self.rel_embedding_8.weight.data)
nn.init.xavier_uniform_(self.rel_w_embedding.weight.data)
self.parameter_list = [
self.ent_embedding_1,
self.ent_embedding_2,
self.ent_embedding_3,
self.ent_embedding_4,
self.ent_embedding_5,
self.ent_embedding_6,
self.ent_embedding_7,
self.ent_embedding_8,
self.rel_embedding_1,
self.rel_embedding_2,
self.rel_embedding_3,
self.rel_embedding_4,
self.rel_embedding_5,
self.rel_embedding_6,
self.rel_embedding_7,
self.rel_embedding_8,
self.rel_w_embedding,
]
self.loss = Criterion.pointwise_logistic
def embed(self, h, r, t):
e_1_h = self.ent_embedding_1(h)
e_2_h = self.ent_embedding_2(h)
e_3_h = self.ent_embedding_3(h)
e_4_h = self.ent_embedding_4(h)
e_5_h = self.ent_embedding_5(h)
e_6_h = self.ent_embedding_6(h)
e_7_h = self.ent_embedding_7(h)
e_8_h = self.ent_embedding_8(h)
e_1_t = self.ent_embedding_1(t)
e_2_t = self.ent_embedding_2(t)
e_3_t = self.ent_embedding_3(t)
e_4_t = self.ent_embedding_4(t)
e_5_t = self.ent_embedding_5(t)
e_6_t = self.ent_embedding_6(t)
e_7_t = self.ent_embedding_7(t)
e_8_t = self.ent_embedding_8(t)
r_1 = self.rel_embedding_1(r)
r_2 = self.rel_embedding_2(r)
r_3 = self.rel_embedding_3(r)
r_4 = self.rel_embedding_4(r)
r_5 = self.rel_embedding_5(r)
r_6 = self.rel_embedding_6(r)
r_7 = self.rel_embedding_7(r)
r_8 = self.rel_embedding_8(r)
return e_1_h, e_2_h, e_3_h, e_4_h, e_5_h, e_6_h, e_7_h, e_8_h, \
e_1_t, e_2_t, e_3_t, e_4_t, e_5_t, e_6_t, e_7_t, e_8_t, \
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8
def forward(self, h, r, t):
e_1_h, e_2_h, e_3_h, e_4_h, e_5_h, e_6_h, e_7_h, e_8_h, \
e_1_t, e_2_t, e_3_t, e_4_t, e_5_t, e_6_t, e_7_t, e_8_t, \
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 = self.embed(h, r, t)
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 = OctonionE._onorm(r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8)
o_1, o_2, o_3, o_4, o_5, o_6, o_7, o_8 = OctonionE._omult(e_1_h, e_2_h, e_3_h, e_4_h, e_5_h, e_6_h, e_7_h, e_8_h,
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8)
score_r = (o_1 * e_1_t + o_2 * e_2_t + o_3 * e_3_t + o_4 * e_4_t
+ o_5 * e_5_t + o_6 * e_6_t + o_7 * e_7_t + o_8 * e_8_t)
return -torch.sum(score_r, -1)
def get_reg(self, h, r, t, reg_type='N3'):
e_1_h, e_2_h, e_3_h, e_4_h, e_5_h, e_6_h, e_7_h, e_8_h, \
e_1_t, e_2_t, e_3_t, e_4_t, e_5_t, e_6_t, e_7_t, e_8_t, \
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 = self.embed(h, r, t)
if reg_type.lower() == 'f2':
regul = (torch.mean(torch.abs(e_1_h) ** 2)
+ torch.mean(torch.abs(e_2_h) ** 2)
+ torch.mean(torch.abs(e_3_h) ** 2)
+ torch.mean(torch.abs(e_4_h) ** 2)
+ torch.mean(torch.abs(e_5_h) ** 2)
+ torch.mean(torch.abs(e_6_h) ** 2)
+ torch.mean(torch.abs(e_7_h) ** 2)
+ torch.mean(torch.abs(e_8_h) ** 2)
+ torch.mean(torch.abs(e_1_t) ** 2)
+ torch.mean(torch.abs(e_2_t) ** 2)
+ torch.mean(torch.abs(e_3_t) ** 2)
+ torch.mean(torch.abs(e_4_t) ** 2)
+ torch.mean(torch.abs(e_5_t) ** 2)
+ torch.mean(torch.abs(e_6_t) ** 2)
+ torch.mean(torch.abs(e_7_t) ** 2)
+ torch.mean(torch.abs(e_8_t) ** 2)
)
regul2 = (torch.mean(torch.abs(r_1) ** 2)
+ torch.mean(torch.abs(r_2) ** 2)
+ torch.mean(torch.abs(r_3) ** 2)
+ torch.mean(torch.abs(r_4) ** 2)
+ torch.mean(torch.abs(r_5) ** 2)
+ torch.mean(torch.abs(r_6) ** 2)
+ torch.mean(torch.abs(r_7) ** 2)
+ torch.mean(torch.abs(r_8) ** 2))
elif reg_type.lower() == 'n3':
regul = (torch.mean(torch.abs(e_1_h) ** 3)
+ torch.mean(torch.abs(e_2_h) ** 3)
+ torch.mean(torch.abs(e_3_h) ** 3)
+ torch.mean(torch.abs(e_4_h) ** 3)
+ torch.mean(torch.abs(e_5_h) ** 3)
+ torch.mean(torch.abs(e_6_h) ** 3)
+ torch.mean(torch.abs(e_7_h) ** 3)
+ torch.mean(torch.abs(e_8_h) ** 3)
+ torch.mean(torch.abs(e_1_t) ** 3)
+ torch.mean(torch.abs(e_2_t) ** 3)
+ torch.mean(torch.abs(e_3_t) ** 3)
+ torch.mean(torch.abs(e_4_t) ** 3)
+ torch.mean(torch.abs(e_5_t) ** 3)
+ torch.mean(torch.abs(e_6_t) ** 3)
+ torch.mean(torch.abs(e_7_t) ** 3)
+ torch.mean(torch.abs(e_8_t) ** 3)
)
regul2 = (torch.mean(torch.abs(r_1) ** 3)
+ torch.mean(torch.abs(r_2) ** 3)
+ torch.mean(torch.abs(r_3) ** 3)
+ torch.mean(torch.abs(r_4) ** 3)
+ torch.mean(torch.abs(r_5) ** 3)
+ torch.mean(torch.abs(r_6) ** 3)
+ torch.mean(torch.abs(r_7) ** 3)
+ torch.mean(torch.abs(r_8) ** 3))
else:
raise NotImplementedError('Unknown regularizer type: %s' % reg_type)
return self.lmbda * (regul + regul2)
@staticmethod
def _qmult(s_a, x_a, y_a, z_a, s_b, x_b, y_b, z_b):
a = s_a * s_b - x_a * x_b - y_a * y_b - z_a * z_b
b = s_a * x_b + s_b * x_a + y_a * z_b - y_b * z_a
c = s_a * y_b + s_b * y_a + z_a * x_b - z_b * x_a
d = s_a * z_b + s_b * z_a + x_a * y_b - x_b * y_a
return a, b, c, d
@staticmethod
def _qstar(a, b, c, d):
return a, -b, -c, -d
@staticmethod
def _omult(a_1, a_2, a_3, a_4, b_1, b_2, b_3, b_4, c_1, c_2, c_3, c_4, d_1, d_2, d_3, d_4):
d_1_star, d_2_star, d_3_star, d_4_star = OctonionE._qstar(d_1, d_2, d_3, d_4)
c_1_star, c_2_star, c_3_star, c_4_star = OctonionE._qstar(c_1, c_2, c_3, c_4)
o_1, o_2, o_3, o_4 = OctonionE._qmult(a_1, a_2, a_3, a_4, c_1, c_2, c_3, c_4)
o_1s, o_2s, o_3s, o_4s = OctonionE._qmult(d_1_star, d_2_star, d_3_star, d_4_star, b_1, b_2, b_3, b_4)
o_5, o_6, o_7, o_8 = OctonionE._qmult(d_1, d_2, d_3, d_4, a_1, a_2, a_3, a_4)
o_5s, o_6s, o_7s, o_8s = OctonionE._qmult(b_1, b_2, b_3, b_4, c_1_star, c_2_star, c_3_star, c_4_star)
return o_1 - o_1s, o_2 - o_2s, o_3 - o_3s, o_4 - o_4s, \
o_5 + o_5s, o_6 + o_6s, o_7 + o_7s, o_8 + o_8s
@staticmethod
def _onorm(r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8):
denominator = torch.sqrt(r_1 ** 2 + r_2 ** 2 + r_3 ** 2 + r_4 ** 2
+ r_5 ** 2 + r_6 ** 2 + r_7 ** 2 + r_8 ** 2)
r_1 = r_1 / denominator
r_2 = r_2 / denominator
r_3 = r_3 / denominator
r_4 = r_4 / denominator
r_5 = r_5 / denominator
r_6 = r_6 / denominator
r_7 = r_7 / denominator
r_8 = r_8 / denominator
return r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8
class MuRP(PointwiseModel):
"""
`Multi-relational Poincaré Graph Embeddings`_
Args:
config (object): Model configuration parameters.
.. _Multi-relational Poincaré Graph Embeddings:
https://arxiv.org/abs/1905.09791
"""
def __init__(self, **kwargs):
super(MuRP, self).__init__(self.__class__.__name__.lower())
param_list = ["tot_entity", "tot_relation", "hidden_size", "lmbda"]
param_dict = self.load_params(param_list, kwargs)
self.__dict__.update(param_dict)
k = self.hidden_size
self.device = kwargs["device"]
self.ent_embeddings = NamedEmbedding("ent_embedding", self.tot_entity, k, padding_idx=0)
self.ent_embeddings.weight.data = (
1e-3 * torch.randn((self.tot_entity, k), dtype=torch.double, device=self.device))
self.rel_embeddings = NamedEmbedding("rel_embedding", self.tot_relation, k, padding_idx=0)
self.rel_embeddings.weight.data = (
1e-3 * torch.randn((self.tot_relation, k), dtype=torch.double, device=self.device))
self.wu = nn.Parameter(
torch.tensor(np.random.uniform(-1, 1, (self.tot_relation, k)), dtype=torch.double, requires_grad=True,
device=self.device))
self.bs = nn.Parameter(
torch.zeros(self.tot_entity, dtype=torch.double, requires_grad=True, device=self.device))
self.bo = nn.Parameter(
torch.zeros(self.tot_entity, dtype=torch.double, requires_grad=True, device=self.device))
self.parameter_list = [
self.ent_embeddings,
self.rel_embeddings,
]
self.loss = Criterion.pointwise_bce
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
h_emb = self.ent_embeddings(h)
r_emb = self.rel_embeddings(r)
t_emb = self.ent_embeddings(t)
return h_emb, r_emb, t_emb
def forward(self, h, r, t):
return self._poincare_forward(h, r, t)
def predict_tail_rank(self, h, r, topk):
del topk
_, rank = torch.sort(self.forward(h, r, torch.LongTensor(list(range(self.tot_entity))).to(self.device)))
return rank
def predict_head_rank(self, t, r, topk):
del topk
_, rank = torch.sort(self.forward(torch.LongTensor(list(range(self.tot_entity))).to(self.device), r, t))
return rank
def predict_rel_rank(self, h, t, topk):
del topk
_, rank = torch.sort(self.forward(h, torch.LongTensor(list(range(self.tot_relation))).to(self.device), t))
return rank
def _poincare_forward(self, h, r, t):
h_emb, r_emb, t_emb = self.embed(h, r, t)
ru = self.wu[r]
h_emb = torch.where(torch.norm(h_emb, 2, dim=-1, keepdim=True) >= 1,
h_emb / (torch.norm(h_emb, 2, dim=-1, keepdim=True) - 1e-5), h_emb)
t_emb = torch.where(torch.norm(t_emb, 2, dim=-1, keepdim=True) >= 1,
t_emb / (torch.norm(t_emb, 2, dim=-1, keepdim=True) - 1e-5), t_emb)
r_emb = torch.where(torch.norm(r_emb, 2, dim=-1, keepdim=True) >= 1,
r_emb / (torch.norm(r_emb, 2, dim=-1, keepdim=True) - 1e-5), r_emb)
u_e = self._p_log_map(h_emb)
u_w = u_e * ru
u_m = self._p_exp_map(u_w)
v_m = self._p_sum(t_emb, r_emb)
u_m = torch.where(torch.norm(u_m, 2, dim=-1, keepdim=True) >= 1,
u_m / (torch.norm(u_m, 2, dim=-1, keepdim=True) - 1e-5), u_m)
v_m = torch.where(torch.norm(v_m, 2, dim=-1, keepdim=True) >= 1,
v_m / (torch.norm(v_m, 2, dim=-1, keepdim=True) - 1e-5), v_m)
sqdist = (2. * self._arsech(
torch.clamp(torch.norm(self._p_sum(-u_m, v_m), 2, dim=-1), 1e-10, 1 - 1e-5))) ** 2
return -(sqdist - self.bs[h] - self.bo[t])
def _euclidean_forward(self, h, r, t):
h_emb, r_emb, t_emb = self.embed(h, r, t)
ru = self.wu[r]
u_w = h_emb * ru
sqdist = torch.sum(torch.pow(u_w - (t_emb + r_emb), 2), dim=-1)
return -(sqdist - self.bs[h] - self.bo[t])
@staticmethod
def _arsech(x):
return torch.log((1 + torch.sqrt(1 - x.pow(2))) / x)
@staticmethod
def _p_exp_map(v):
normv = torch.clamp(torch.norm(v, 2, dim=-1, keepdim=True), min=1e-10)
return (1 / torch.cosh(normv)) * v / normv
@staticmethod
def _p_log_map(v):
normv = torch.clamp(torch.norm(v, 2, dim=-1, keepdim=True), 1e-10, 1 - 1e-5)
return MuRP._arsech(normv) * v / normv
@staticmethod
def _p_sum(x, y):
sqxnorm = torch.clamp(torch.sum(x * x, dim=-1, keepdim=True), 0, 1 - 1e-5)
sqynorm = torch.clamp(torch.sum(y * y, dim=-1, keepdim=True), 0, 1 - 1e-5)
dotxy = torch.sum(x * y, dim=-1, keepdim=True)
numerator = (1 + 2 * dotxy + sqynorm) * x + (1 - sqxnorm) * y
denominator = 1 + 2 * dotxy + sqxnorm * sqynorm
return numerator / denominator
| 48,166 | 41.475309 | 178 | py |
pykg2vec | pykg2vec-master/pykg2vec/models/KGMeta.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Knowledge Graph Meta Class
====================================
It provides Abstract class for the Knowledge graph models.
"""
from pykg2vec.common import TrainingStrategy
from abc import ABCMeta
import torch.nn as nn
class Model:
""" Meta Class for knowledge graph embedding models"""
def __init__(self):
self.database = None
def embed(self, h, r, t):
"""Function to get the embedding value"""
raise NotImplementedError
def forward(self, h, r, t):
"""Function to get the embedding value"""
raise NotImplementedError
def load_params(self, param_list, kwargs):
"""Function to load the hyperparameters"""
for param_name in param_list:
if param_name not in kwargs:
raise Exception("hyperparameter %s not found!" % param_name)
self.database[param_name] = kwargs[param_name]
return self.database
def get_reg(self, h, r, t, **kwargs):
"""Function to override if regularization is needed"""
return 0.0
class PairwiseModel(nn.Module, Model):
""" Meta Class for KGE models with translational distance"""
__metaclass__ = ABCMeta
def __init__(self, model_name):
"""Initialize and create the model to be trained and inferred"""
super(PairwiseModel, self).__init__()
self.model_name = model_name
self.training_strategy = TrainingStrategy.PAIRWISE_BASED
self.database = {} # dict to store model-specific hyperparameter
class PointwiseModel(nn.Module, Model):
""" Meta Class for KGE models with semantic matching"""
__metaclass__ = ABCMeta
def __init__(self, model_name):
"""Initialize and create the model to be trained and inferred"""
super(PointwiseModel, self).__init__()
self.model_name = model_name
self.training_strategy = TrainingStrategy.POINTWISE_BASED
self.database = {} # dict to store model-specific hyperparameter
class ProjectionModel(nn.Module, Model):
""" Meta Class for KGE models with neural network"""
__metaclass__ = ABCMeta
def __init__(self, model_name):
"""Initialize and create the model to be trained and inferred"""
super(ProjectionModel, self).__init__()
self.model_name = model_name
self.training_strategy = TrainingStrategy.PROJECTION_BASED
self.database = {} # dict to store model-specific hyperparameter
| 2,497 | 29.839506 | 76 | py |
pykg2vec | pykg2vec-master/pykg2vec/test/test_generator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for testing unit functions of generator
"""
import torch
from pykg2vec.data.generator import Generator
from pykg2vec.common import Importer, KGEArgParser
from pykg2vec.data.kgcontroller import KnowledgeGraph
def test_generator_projection():
"""Function to test the generator for projection based algorithm."""
knowledge_graph = KnowledgeGraph(dataset="freebase15k")
knowledge_graph.force_prepare_data()
config_def, model_def = Importer().import_model_config("proje_pointwise")
config = config_def(KGEArgParser().get_args([]))
generator = Generator(model_def(**config.__dict__), config)
generator.start_one_epoch(10)
for _ in range(10):
data = list(next(generator))
assert len(data) == 5
h = data[0]
r = data[1]
t = data[2]
hr_t = data[3]
tr_h = data[4]
assert len(h) == len(r)
assert len(h) == len(t)
assert isinstance(hr_t, torch.Tensor)
assert isinstance(tr_h, torch.Tensor)
generator.stop()
def test_generator_pointwise():
"""Function to test the generator for pointwise based algorithm."""
knowledge_graph = KnowledgeGraph(dataset="freebase15k")
knowledge_graph.force_prepare_data()
config_def, model_def = Importer().import_model_config("complex")
config = config_def(KGEArgParser().get_args([]))
generator = Generator(model_def(**config.__dict__), config)
generator.start_one_epoch(10)
for _ in range(10):
data = list(next(generator))
assert len(data) == 4
h = data[0]
r = data[1]
t = data[2]
y = data[3]
assert len(h) == len(r)
assert len(h) == len(t)
assert set(y) == {1, -1}
generator.stop()
def test_generator_pairwise():
"""Function to test the generator for pairwise based algorithm."""
knowledge_graph = KnowledgeGraph(dataset="freebase15k")
knowledge_graph.force_prepare_data()
config_def, model_def = Importer().import_model_config('transe')
config = config_def(KGEArgParser().get_args([]))
generator = Generator(model_def(**config.__dict__), config)
generator.start_one_epoch(10)
for _ in range(10):
data = list(next(generator))
assert len(data) == 6
ph = data[0]
pr = data[1]
pt = data[2]
nh = data[3]
nr = data[4]
nt = data[5]
assert len(ph) == len(pr)
assert len(ph) == len(pt)
assert len(ph) == len(nh)
assert len(ph) == len(nr)
assert len(ph) == len(nt)
generator.stop()
| 2,641 | 30.082353 | 77 | py |
pykg2vec | pykg2vec-master/pykg2vec/utils/riemannian_optimizer.py | import torch
from torch.optim.optimizer import Optimizer
class RiemannianOptimizer(Optimizer):
"""Riemannian stochastic gradient descent"""
def __init__(self, params, lr, param_names):
defaults = dict(lr=lr)
super(RiemannianOptimizer, self).__init__(params, defaults)
self.param_names = param_names
def step(self, lr=None):
loss = None
for group in self.param_groups:
for i, p in enumerate(group["params"]):
if p.grad is None:
continue
d_p = p.grad.data
if lr is None:
lr = group["lr"]
if self.param_names[i] in ["ent_embeddings.weight", "rel_embeddings.weight"]:
d_p = self._poincare_grad(p, d_p)
p.data = self._poincare_update(p, d_p, lr)
else:
p.data = self._euclidean_update(p, d_p, lr)
return loss
@staticmethod
def _euclidean_update(p, d_p, lr):
p.data = p.data - lr * d_p
return p.data
@staticmethod
def _poincare_grad(p, d_p):
p_sqnorm = torch.clamp(torch.sum(p.data ** 2, dim=-1, keepdim=True), 0, 1 - 1e-5)
d_p = d_p * ((1 - p_sqnorm) ** 2 / 4).expand_as(d_p)
return d_p
@staticmethod
def _poincare_update(p, d_p, lr):
v = -lr * d_p
p.data = RiemannianOptimizer._full_p_exp_map(p.data, v)
return p.data
@staticmethod
def _full_p_exp_map(x, v):
normv = torch.clamp(torch.norm(v, 2, dim=-1, keepdim=True), min=1e-10)
sqxnorm = torch.clamp(torch.sum(x * x, dim=-1, keepdim=True), 0, 1 - 1e-5)
y = torch.tanh(normv / (1 - sqxnorm)) * v / normv
return RiemannianOptimizer._p_sum(x, y)
@staticmethod
def _p_sum(x, y):
sqxnorm = torch.clamp(torch.sum(x * x, dim=-1, keepdim=True), 0, 1 - 1e-5)
sqynorm = torch.clamp(torch.sum(y * y, dim=-1, keepdim=True), 0, 1 - 1e-5)
dotxy = torch.sum(x * y, dim=-1, keepdim=True)
numerator = (1 + 2 * dotxy + sqynorm) * x + (1 - sqxnorm) * y
denominator = 1 + 2 * dotxy + sqxnorm * sqynorm
return numerator / denominator
| 2,205 | 35.163934 | 93 | py |
pykg2vec | pykg2vec-master/pykg2vec/utils/visualization.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for visualizing the results
"""
import os
import seaborn
import torch
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from sklearn.manifold import TSNE
from matplotlib import colors as mcolors
from pykg2vec.utils.logger import Logger
seaborn.set_style("darkgrid")
class Visualization:
"""Class to aid in visualizing the results and embddings.
Args:
model (object): Model object
vis_opts (list): Options for visualization.
sess (object): TensorFlow session object, initialized by the trainer.
Examples:
>>> from pykg2vec.utils.visualization import Visualization
>>> from pykg2vec.utils.trainer import Trainer
>>> from pykg2vec.models.TransE import TransE
>>> model = TransE()
>>> trainer = Trainer(model=model)
>>> trainer.build_model()
>>> trainer.train_model()
>>> viz = Visualization(model=model)
>>> viz.plot_train_result()
"""
_logger = Logger().get_logger(__name__)
def __init__(self, model, config, vis_opts=None):
if vis_opts:
self.ent_only_plot = vis_opts["ent_only_plot"]
self.rel_only_plot = vis_opts["rel_only_plot"]
self.ent_and_rel_plot = vis_opts["ent_and_rel_plot"]
else:
self.ent_only_plot = False
self.rel_only_plot = False
self.ent_and_rel_plot = False
self.model = model
self.config = config
self.algo_list = ['ANALOGY', 'Complex', 'ComplexN3', 'ConvE', 'CP', 'DistMult', 'DistMult2', 'HoLE',
'KG2E', 'NTN', 'ProjE_pointwise', 'Rescal', 'RotatE', 'SimplE_avg',
'SimplE_ignr', 'SLM', 'SME_Bilinear', 'SME_Linear', 'TransD', 'TransE', 'TransH', 'TransM',
'TransR', 'TuckER']
self.h_name = []
self.r_name = []
self.t_name = []
self.h_emb = []
self.r_emb = []
self.t_emb = []
self.h_proj_emb = []
self.r_proj_emb = []
self.t_proj_emb = []
if self.model is not None:
self.validation_triples_ids = self.config.knowledge_graph.read_cache_data('triplets_valid')
self.idx2entity = self.config.knowledge_graph.read_cache_data('idx2entity')
self.idx2relation = self.config.knowledge_graph.read_cache_data('idx2relation')
self.get_idx_n_emb()
def get_idx_n_emb(self):
"""Function to get the integer ids and the embedding."""
idx = np.random.choice(len(self.validation_triples_ids), self.config.disp_triple_num)
triples = []
for i, _ in enumerate(idx):
triples.append(self.validation_triples_ids[idx[i]])
for t in triples:
self.h_name.append(self.idx2entity[t.h])
self.r_name.append(self.idx2relation[t.r])
self.t_name.append(self.idx2entity[t.t])
emb_h, emb_r, emb_t = self.model.embed(torch.LongTensor([t.h]).to(self.config.device), torch.LongTensor([t.r]).to(self.config.device), torch.LongTensor([t.t]).to(self.config.device))
self.h_emb.append(emb_h)
self.r_emb.append(emb_r)
self.t_emb.append(emb_t)
if self.ent_and_rel_plot:
try:
emb_h, emb_r, emb_t = self.model.embed(torch.LongTensor([t.h]).to(self.config.device), torch.LongTensor([t.r]).to(self.config.device), torch.LongTensor([t.t]).to(self.config.device))
self.h_proj_emb.append(emb_h)
self.r_proj_emb.append(emb_r)
self.t_proj_emb.append(emb_t)
except Exception as e:
self._logger.exception(e)
def plot_embedding(self,
resultpath=None,
algos=None,
show_label=False,
disp_num_r_n_e=20):
"""Function to plot the embedding.
Args:
resultpath (str): Path where the result will be saved.
show_label (bool): If True, will display the labels.
algos (str): Name of the algorithms that generated the embedding.
disp_num_r_n_e (int): Total number of entities to display for head, tail and relation.
"""
assert self.model is not None, 'Please provide a model!'
if self.ent_only_plot:
x = torch.cat(self.h_emb + self.t_emb, dim=0)
ent_names = np.concatenate((self.h_name, self.t_name), axis=0)
self._logger.info("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x.detach().cpu())
x = np.asarray(x)
ent_names = np.asarray(ent_names)
self.draw_embedding(x, ent_names, resultpath, algos + '_entity_plot', show_label)
if self.rel_only_plot:
x = torch.cat(self.r_emb, dim=0)
self._logger.info("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x.detach().cpu())
self.draw_embedding(x, self.r_name, resultpath, algos + '_rel_plot', show_label)
if self.ent_and_rel_plot:
length = len(self.h_proj_emb)
x = torch.cat(self.h_proj_emb + self.r_proj_emb + self.t_proj_emb, dim=0)
self._logger.info("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x.detach().cpu())
h_embs = x[:length, :]
r_embs = x[length:2 * length, :]
t_embs = x[2 * length:3 * length, :]
self.draw_embedding_rel_space(h_embs[:disp_num_r_n_e],
r_embs[:disp_num_r_n_e],
t_embs[:disp_num_r_n_e],
self.h_name[:disp_num_r_n_e],
self.r_name[:disp_num_r_n_e],
self.t_name[:disp_num_r_n_e],
resultpath, algos + '_ent_n_rel_plot', show_label)
def plot_train_result(self):
"""Function to plot the training result."""
algo = self.algo_list
path = self.config.path_result
result = self.config.path_figures
data = [self.config.dataset_name]
files = os.listdir(str(path))
files_lwcase = [f.lower() for f in files]
for d in data:
df = pd.DataFrame()
for a in algo:
file_no = len([c for c in files_lwcase if a.lower() in c if 'training' in c])
if file_no < 1:
continue
file_path = str(path / (a.lower() + '_Training_results_' + str(file_no - 1) + '.csv'))
if os.path.exists(file_path):
with open(str(path / (a.lower() + '_Training_results_' + str(file_no - 1) + '.csv')), 'r') as fh:
df_2 = pd.read_csv(fh)
if df.empty:
df['Epochs'] = df_2['Epochs']
df['Loss'] = df_2['Loss']
df['Algorithm'] = [a] * len(df_2)
else:
df_3 = pd.DataFrame()
df_3['Epochs'] = df_2['Epochs']
df_3['Loss'] = df_2['Loss']
df_3['Algorithm'] = [a] * len(df_2)
frames = [df, df_3]
df = pd.concat(frames)
plt.figure()
seaborn.lineplot(x="Epochs", y="Loss", hue="Algorithm", markers=True, dashes=False, data=df)
files = os.listdir(str(result))
files_lwcase = [f.lower() for f in files]
file_no = len([c for c in files_lwcase if d.lower() in c if 'training' in c])
plt.savefig(str(result / (d + '_training_loss_plot_' + str(file_no) + '.pdf')), bbox_inches='tight', dpi=300)
# plt.show()
def plot_test_result(self):
"""Function to plot the testing result."""
algo = self.algo_list
path = self.config.path_result
result = self.config.path_figures
data = [self.config.dataset_name]
hits = self.config.hits
assert path is not None and algo is not None and data is not None, 'Please provide valid path, algorithm and dataset!'
files = os.listdir(str(path))
# files_lwcase = [f.lower() for f in files if 'Testing' in f]
# self._logger.info(files_lwcase)
for d in data:
df = pd.DataFrame()
for a in algo:
file_algo = [c for c in files if a.lower() in c.lower() if 'testing' in c.lower()]
if not file_algo:
continue
with open(str(path / file_algo[-1]), 'r') as fh:
df_2 = pd.read_csv(fh)
if df.empty:
df['Algorithm'] = [a] * len(df_2)
df['Epochs'] = df_2['Epoch']
df['Mean Rank'] = df_2['Mean Rank']
df['Filt Mean Rank'] = df_2['Filtered Mean Rank']
for hit in hits:
df['Hits' + str(hit)] = df_2['Hit-%d Ratio'%hit]
df['Filt Hits' + str(hit)] = df_2['Filtered Hit-%d Ratio'%hit]
else:
df_3 = pd.DataFrame()
df_3['Algorithm'] = [a] * len(df_2)
df_3['Epochs'] = df_2['Epoch']
df_3['Mean Rank'] = df_2['Mean Rank']
df_3['Filt Mean Rank'] = df_2['Filtered Mean Rank']
for hit in hits:
df_3['Hits' + str(hit)] = df_2['Hit-%d Ratio'%hit]
df_3['Filt Hits' + str(hit)] = df_2['Filtered Hit-%d Ratio'%hit]
frames = [df, df_3]
df = pd.concat(frames)
files = os.listdir(str(result))
df_4 = df.loc[df['Epochs'] == max(df['Epochs'])]
df_4 = df_4.loc[:, df_4.columns != 'Epochs']
file_no = len(
[c for c in files if d.lower() in c.lower() if 'testing' in c.lower() if 'latex' in c.lower()])
with open(str(result / (d + '_testing_latex_table_' + str(file_no + 1) + '.txt')), 'w') as fh:
fh.write(df_4.to_latex(index=False))
file_no = len(
[c for c in files if d.lower() in c.lower() if 'testing' in c.lower() if 'table' in c.lower() if
'csv' in c.lower()])
with open(str(result / (d + '_testing_table_' + str(file_no + 1) + '.csv')), 'w') as fh:
df_4.to_csv(fh, index=False)
df_5 = pd.DataFrame(columns=['Metrics', 'Algorithm', 'Score'])
metrics = [f for f in df_4.columns if f != 'Algorithm']
for i in range(len(df_4)):
if df_5.empty:
df_5['Algorithm'] = [df_4.iloc[i]['Algorithm']] * len(metrics)
df_5['Metrics'] = metrics
df_5['Score'] = df_4.iloc[i][metrics].values
else:
df_t = pd.DataFrame()
df_t['Algorithm'] = [df_4.iloc[i]['Algorithm']] * len(metrics)
df_t['Metrics'] = metrics
df_t['Score'] = df_4.iloc[i][metrics].values
frame = [df_5, df_t]
df_5 = pd.concat(frame)
df_6 = df_5[~df_5['Metrics'].str.contains('Hits')]
plt.figure()
flatui = ["#d46a7e", "#d5b60a", "#9b59b6", "#3498db", "#95a5a6", "#34495e", "#2ecc71", "#e74c3c"]
g = seaborn.barplot(x="Metrics", y='Score', hue="Algorithm", palette=flatui, data=df_6)
g.legend(loc='upper center', bbox_to_anchor=(0.5, 1.14), ncol=6)
g.tick_params(labelsize=6)
# ax = seaborn.lineplot(x="Metrics", y='Score', hue="Algorithm",
# markers=True, dashes=False, data=df_5)
files_lwcase = [f.lower() for f in files]
file_no = len([c for c in files_lwcase if d.lower() in c if 'testing' in c if 'rank_plot' in c])
plt.savefig(str(result / (d + '_testing_rank_plot_' + str(file_no + 1) + '.pdf')), bbox_inches='tight',
dpi=300)
# plt.show()
df_6 = df_5[df_5['Metrics'].str.contains('Hits')]
plt.figure()
flatui = ["#3498db", "#95a5a6", "#34495e", "#2ecc71", "#e74c3c", "#d46a7e", "#d5b60a", "#9b59b6"]
g = seaborn.barplot(x="Metrics", y='Score', hue="Algorithm", palette=flatui, data=df_6)
g.legend(loc='upper center', bbox_to_anchor=(0.5, 1.14), ncol=6)
g.tick_params(labelsize=6)
files_lwcase = [f.lower() for f in files]
file_no = len([c for c in files_lwcase if d.lower() in c if 'testing' in c if 'hits_plot' in c])
plt.savefig(str(result / (d + '_testing_hits_plot_' + str(file_no + 1) + '.pdf')), bbox_inches='tight',
dpi=300)
# plt.show()
@staticmethod
def draw_embedding(embs, names, resultpath, algos, show_label):
"""Function to draw the embedding.
Args:
embs (matrix): Two dimesnional embeddings.
names (list):List of string name.
resultpath (str):Path where the result will be save.
algos (str): Name of the algorithms which generated the algorithm.
show_label (bool): If True, prints the string names of the entities and relations.
"""
pos = {}
node_color_mp = {}
unique_ent = set(names)
colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())
tot_col = len(colors)
j = 0
for i, e in enumerate(unique_ent):
node_color_mp[e] = colors[j]
j += 1
if j >= tot_col:
j = 0
G = nx.Graph()
hm_ent = {}
for i, ent in enumerate(names):
hm_ent[i] = ent
G.add_node(i)
pos[i] = embs[i]
colors = []
for n in list(G.nodes):
colors.append(node_color_mp[hm_ent[n]])
plt.figure()
nodes_draw = nx.draw_networkx_nodes(G,
pos,
node_color=colors,
node_size=50)
nodes_draw.set_edgecolor('k')
if show_label:
nx.draw_networkx_labels(G, pos, font_size=8)
if not os.path.exists(resultpath):
os.mkdir(resultpath)
files = os.listdir(resultpath)
file_no = len(
[c for c in files if algos + '_embedding_plot' in c])
filename = algos + '_embedding_plot_' + str(file_no) + '.png'
plt.savefig(str(resultpath / filename), bbox_inches='tight', dpi=300)
# plt.show()
@staticmethod
def draw_embedding_rel_space(h_emb,
r_emb,
t_emb,
h_name,
r_name,
t_name,
resultpath,
algos,
show_label):
"""Function to draw the embedding in relation space.
Args:
h_emb (matrix): Two dimesnional embeddings of head.
r_emb (matrix): Two dimesnional embeddings of relation.
t_emb (matrix): Two dimesnional embeddings of tail.
h_name (list):List of string name of the head.
r_name (list):List of string name of the relation.
t_name (list):List of string name of the tail.
resultpath (str):Path where the result will be save.
algos (str): Name of the algorithms which generated the algorithm.
show_label (bool): If True, prints the string names of the entities and relations.
"""
pos = {}
node_color_mp_ent = {}
node_color_mp_rel = {}
unique_ent = set(h_name) | set(t_name)
unique_rel = set(r_name)
colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())
tot_col = len(colors)
j = 0
for i, e in enumerate(unique_ent):
node_color_mp_ent[e] = colors[j]
j += 1
if j >= tot_col:
j = 0
tot_col = len(colors)
j = 0
for i, r in enumerate(unique_rel):
node_color_mp_rel[r] = colors[j]
j += 1
if j >= tot_col:
j = 0
G = nx.DiGraph()
idx = 0
head_colors = []
rel_colors = []
tail_colors = []
head_nodes = []
tail_nodes = []
rel_nodes = []
for i, _ in enumerate(h_name):
G.add_edge(idx, idx + 1)
G.add_edge(idx + 1, idx + 2)
head_nodes.append(idx)
rel_nodes.append(idx + 1)
tail_nodes.append(idx + 2)
head_colors.append(node_color_mp_ent[h_name[i]])
rel_colors.append(node_color_mp_rel[r_name[i]])
tail_colors.append(node_color_mp_ent[t_name[i]])
pos[idx] = h_emb[i]
pos[idx + 1] = r_emb[i]
pos[idx + 2] = t_emb[i]
idx += 3
plt.figure()
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=head_nodes,
node_color=head_colors,
node_shape='o',
node_size=50)
nodes_draw.set_edgecolor('k')
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=rel_nodes,
node_color=rel_colors,
node_size=50,
node_shape='D')
nodes_draw.set_edgecolor('k')
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=tail_nodes,
node_color=tail_colors,
node_shape='*',
node_size=50)
nodes_draw.set_edgecolor('k')
if show_label:
nx.draw_networkx_labels(G, pos, font_size=8)
nx.draw_networkx_edges(G, pos, arrows=True, width=0.5, alpha=0.5)
if not os.path.exists(resultpath):
os.mkdir(resultpath)
files = os.listdir(resultpath)
file_no = len(
[c for c in files if algos + '_embedding_plot' in c])
plt.savefig(str(resultpath / (algos + '_embedding_plot_' + str(file_no) + '.png')), bbox_inches='tight',
dpi=300)
# plt.show()
| 19,354 | 40.893939 | 202 | py |
pykg2vec | pykg2vec-master/pykg2vec/utils/evaluator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for evaluating the results
"""
import os
import timeit
import torch
import numpy as np
import pandas as pd
from pykg2vec.utils.logger import Logger
from tqdm import tqdm
class MetricCalculator:
'''
MetricCalculator aims to
1) address all the statistic tasks.
2) provide interfaces for querying results.
MetricCalculator is expected to be used by "evaluation_process".
'''
_logger = Logger().get_logger(__name__)
def __init__(self, config):
self.config = config
self.hr_t = config.knowledge_graph.read_cache_data('hr_t')
self.tr_h = config.knowledge_graph.read_cache_data('tr_h')
# (f)mr : (filtered) mean rank
# (f)mrr : (filtered) mean reciprocal rank
# (f)hit : (filtered) hit-k ratio
self.mr = {}
self.fmr = {}
self.mrr = {}
self.fmrr = {}
self.hit = {}
self.fhit = {}
self.epoch = None
self.reset()
def reset(self):
# temporarily used buffers and indexes.
self.rank_head = []
self.rank_tail = []
self.f_rank_head = []
self.f_rank_tail = []
self.epoch = None
self.start_time = timeit.default_timer()
def append_result(self, result):
predict_tail = result[0]
predict_head = result[1]
h, r, t = result[2], result[3], result[4]
self.epoch = result[5]
t_rank, f_t_rank = self.get_tail_rank(predict_tail, h, r, t)
h_rank, f_h_rank = self.get_head_rank(predict_head, h, r, t)
self.rank_head.append(h_rank)
self.rank_tail.append(t_rank)
self.f_rank_head.append(f_h_rank)
self.f_rank_tail.append(f_t_rank)
def get_tail_rank(self, tail_candidate, h, r, t):
"""Function to evaluate the tail rank.
Args:
id_replace_tail (list): List of the predicted tails for the given head, relation pair
h (int): head id
r (int): relation id
t (int): tail id
hr_t (dict): list of tails for the given hwS and relation pari.
Returns:
Tensors: Returns tail rank and filetered tail rank
"""
trank = 0
ftrank = 0
for j in range(len(tail_candidate)):
val = tail_candidate[-j - 1]
if val != t:
trank += 1
ftrank += 1
if val in self.hr_t[(h, r)]:
ftrank -= 1
else:
break
return trank, ftrank
def get_head_rank(self, head_candidate, h, r, t):
"""Function to evaluate the head rank.
Args:
head_candidate (list): List of the predicted head for the given tail, relation pair
h (int): head id
r (int): relation id
t (int): tail id
Returns:
Tensors: Returns head rank and filetered head rank
"""
hrank = 0
fhrank = 0
for j in range(len(head_candidate)):
val = head_candidate[-j - 1]
if val != h:
hrank += 1
fhrank += 1
if val in self.tr_h[(t, r)]:
fhrank -= 1
else:
break
return hrank, fhrank
def settle(self):
head_ranks = np.asarray(self.rank_head, dtype=np.float32)+1
tail_ranks = np.asarray(self.rank_tail, dtype=np.float32)+1
head_franks = np.asarray(self.f_rank_head, dtype=np.float32)+1
tail_franks = np.asarray(self.f_rank_tail, dtype=np.float32)+1
ranks = np.concatenate((head_ranks, tail_ranks))
franks = np.concatenate((head_franks, tail_franks))
self.mr[self.epoch] = np.mean(ranks)
self.mrr[self.epoch] = np.mean(np.reciprocal(ranks))
self.fmr[self.epoch] = np.mean(franks)
self.fmrr[self.epoch] = np.mean(np.reciprocal(franks))
for hit in self.config.hits:
self.hit[(self.epoch, hit)] = np.mean(ranks <= hit, dtype=np.float32)
self.fhit[(self.epoch, hit)] = np.mean(franks <= hit, dtype=np.float32)
def get_curr_scores(self):
scores = {'mr': self.mr[self.epoch],
'fmr':self.fmr[self.epoch],
'mrr':self.mrr[self.epoch],
'fmrr':self.fmrr[self.epoch]}
return scores
def save_test_summary(self, model_name):
"""Function to save the test of the summary.
Args:
model_name (str): specify the name of the model.
"""
files = os.listdir(str(self.config.path_result))
l = len([f for f in files if model_name in f if 'Testing' in f])
with open(str(self.config.path_result / (model_name + '_summary_' + str(l) + '.txt')), 'w') as fh:
fh.write('----------------SUMMARY----------------\n')
for key, val in self.config.__dict__.items():
if 'gpu' in key:
continue
if 'knowledge_graph' in key:
continue
if not isinstance(val, str):
if isinstance(val, list):
v_tmp = '['
for i, v in enumerate(val):
if i == 0:
v_tmp += str(v)
else:
v_tmp += ',' + str(v)
v_tmp += ']'
val = v_tmp
else:
val = str(val)
fh.write(key + ':' + val + '\n')
fh.write('-----------------------------------------\n')
fh.write("\n----------Metadata Info for Dataset:%s----------------" % self.config.knowledge_graph.dataset_name)
fh.write("Total Training Triples :%d\n"%self.config.tot_train_triples)
fh.write("Total Testing Triples :%d\n"%self.config.tot_test_triples)
fh.write("Total validation Triples :%d\n"%self.config.tot_valid_triples)
fh.write("Total Entities :%d\n"%self.config.tot_entity)
fh.write("Total Relations :%d\n"%self.config.tot_relation)
fh.write("---------------------------------------------")
columns = ['Epoch', 'Mean Rank', 'Filtered Mean Rank', 'Mean Reciprocal Rank', 'Filtered Mean Reciprocal Rank']
for hit in self.config.hits:
columns += ['Hit-%d Ratio'%hit, 'Filtered Hit-%d Ratio'%hit]
results = []
for epoch, _ in self.mr.items():
res_tmp = [epoch, self.mr[epoch], self.fmr[epoch], self.mrr[epoch], self.fmrr[epoch]]
for hit in self.config.hits:
res_tmp.append(self.hit[(epoch, hit)])
res_tmp.append(self.fhit[(epoch, hit)])
results.append(res_tmp)
df = pd.DataFrame(results, columns=columns)
with open(str(self.config.path_result / (model_name + '_Testing_results_' + str(l) + '.csv')), 'a') as fh:
df.to_csv(fh)
def display_summary(self):
"""Function to print the test summary."""
stop_time = timeit.default_timer()
test_results = []
test_results.append('')
test_results.append("------Test Results for %s: Epoch: %d --- time: %.2f------------" % (self.config.dataset_name, self.epoch, stop_time - self.start_time))
test_results.append('--# of entities, # of relations: %d, %d'%(self.config.tot_entity, self.config.tot_relation))
test_results.append('--mr, filtered mr : %.4f, %.4f'%(self.mr[self.epoch], self.fmr[self.epoch]))
test_results.append('--mrr, filtered mrr : %.4f, %.4f'%(self.mrr[self.epoch], self.fmrr[self.epoch]))
for hit in self.config.hits:
test_results.append('--hits%d : %.4f ' % (hit, (self.hit[(self.epoch, hit)])))
test_results.append('--filtered hits%d : %.4f ' % (hit, (self.fhit[(self.epoch, hit)])))
test_results.append("---------------------------------------------------------")
test_results.append('')
self._logger.info("\n".join(test_results))
class Evaluator:
"""Class to perform evaluation of the model.
Args:
model (object): Model object
tuning (bool): Flag to denoting tuning if True
Examples:
>>> from pykg2vec.utils.evaluator import Evaluator
>>> evaluator = Evaluator(model=model, tuning=True)
>>> evaluator.test_batch(Session(), 0)
>>> acc = evaluator.output_queue.get()
>>> evaluator.stop()
"""
_logger = Logger().get_logger(__name__)
def __init__(self, model, config, tuning=False):
self.model = model
self.config = config
self.tuning = tuning
self.test_data = self.config.knowledge_graph.read_cache_data('triplets_test')
self.eval_data = self.config.knowledge_graph.read_cache_data('triplets_valid')
self.metric_calculator = MetricCalculator(self.config)
def test_tail_rank(self, h, r, topk=-1):
if hasattr(self.model, 'predict_tail_rank'):
rank = self.model.predict_tail_rank(torch.LongTensor([h]).to(self.config.device), torch.LongTensor([r]).to(self.config.device), topk=topk)
return rank.squeeze(0)
h_batch = torch.LongTensor([h]).repeat([self.config.tot_entity]).to(self.config.device)
r_batch = torch.LongTensor([r]).repeat([self.config.tot_entity]).to(self.config.device)
entity_array = torch.LongTensor(list(range(self.config.tot_entity))).to(self.config.device)
preds = self.model.forward(h_batch, r_batch, entity_array)
_, rank = torch.topk(preds, k=topk)
return rank
def test_head_rank(self, r, t, topk=-1):
if hasattr(self.model, 'predict_head_rank'):
rank = self.model.predict_head_rank(torch.LongTensor([t]).to(self.config.device), torch.LongTensor([r]).to(self.config.device), topk=topk)
return rank.squeeze(0)
entity_array = torch.LongTensor(list(range(self.config.tot_entity))).to(self.config.device)
r_batch = torch.LongTensor([r]).repeat([self.config.tot_entity]).to(self.config.device)
t_batch = torch.LongTensor([t]).repeat([self.config.tot_entity]).to(self.config.device)
preds = self.model.forward(entity_array, r_batch, t_batch)
_, rank = torch.topk(preds, k=topk)
return rank
def test_rel_rank(self, h, t, topk=-1):
if hasattr(self.model, 'predict_rel_rank'):
# TODO: This is not implemented for conve, proje_pointwise, tucker, interacte, hyper and acre
rank = self.model.predict_rel_rank(h.to(self.config.device), t.to(self.config.device), topk=topk)
return rank.squeeze(0)
h_batch = torch.LongTensor([h]).repeat([self.config.tot_relation]).to(self.config.device)
rel_array = torch.LongTensor(list(range(self.config.tot_relation))).to(self.config.device)
t_batch = torch.LongTensor([t]).repeat([self.config.tot_relation]).to(self.config.device)
preds = self.model.forward(h_batch, rel_array, t_batch)
_, rank = torch.topk(preds, k=topk)
return rank
def mini_test(self, epoch=None):
if self.config.test_num == 0:
tot_valid_to_test = len(self.eval_data)
else:
tot_valid_to_test = min(self.config.test_num, len(self.eval_data))
if self.config.debug:
tot_valid_to_test = 10
self._logger.info("Mini-Testing on [%d/%d] Triples in the valid set." % (tot_valid_to_test, len(self.eval_data)))
return self.test(self.eval_data, tot_valid_to_test, epoch=epoch)
def full_test(self, epoch=None):
tot_valid_to_test = len(self.test_data)
if self.config.debug:
tot_valid_to_test = 10
self._logger.info("Full-Testing on [%d/%d] Triples in the test set." % (tot_valid_to_test, len(self.test_data)))
return self.test(self.test_data, tot_valid_to_test, epoch=epoch)
def test(self, data, num_of_test, epoch=None):
self.metric_calculator.reset()
progress_bar = tqdm(range(num_of_test))
for i in progress_bar:
h, r, t = data[i].h, data[i].r, data[i].t
# generate head batch and predict heads.
h_tensor = torch.LongTensor([h])
r_tensor = torch.LongTensor([r])
t_tensor = torch.LongTensor([t])
hrank = self.test_head_rank(r_tensor, t_tensor, self.config.tot_entity)
trank = self.test_tail_rank(h_tensor, r_tensor, self.config.tot_entity)
result_data = [trank.detach().cpu().numpy(), hrank.detach().cpu().numpy(), h, r, t, epoch]
self.metric_calculator.append_result(result_data)
self.metric_calculator.settle()
self.metric_calculator.display_summary()
if self.metric_calculator.epoch >= self.config.epochs - 1:
self.metric_calculator.save_test_summary(self.model.model_name)
return self.metric_calculator.get_curr_scores()
| 13,276 | 38.632836 | 164 | py |
pykg2vec | pykg2vec-master/pykg2vec/utils/criterion.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Criterion:
"""Utility for calculating KGE losses
Loss Functions in Knowledge Graph Embedding Models
http://ceur-ws.org/Vol-2377/paper_1.pdf
"""
@staticmethod
def pariwise_logistic(pos_preds, neg_preds, neg_rate, alpha):
# RotatE: Adversarial Negative Sampling and alpha is the temperature.
pos_preds = -pos_preds
neg_preds = -neg_preds
pos_preds = F.logsigmoid(pos_preds)
neg_preds = neg_preds.view((-1, neg_rate))
softmax = nn.Softmax(dim=1)(neg_preds * alpha).detach()
neg_preds = torch.sum(softmax * (F.logsigmoid(-neg_preds)), dim=-1)
loss = -neg_preds.mean() - pos_preds.mean()
return loss
@staticmethod
def pairwise_hinge(pos_preds, neg_preds, margin):
loss = pos_preds + margin - neg_preds
loss = torch.max(loss, torch.zeros_like(loss)).sum()
return loss
@staticmethod
def pointwise_logistic(preds, target):
loss = F.softplus(target*preds).mean()
return loss
@staticmethod
def pointwise_bce(preds, target):
loss = torch.nn.BCEWithLogitsLoss()(preds, torch.clamp(target, min=0.0, max=1.0))
return loss
@staticmethod
def multi_class_bce(pred_heads, pred_tails, tr_h, hr_t, label_smoothing, tot_entity):
if label_smoothing is not None and tot_entity is not None:
hr_t = hr_t * (1.0 - label_smoothing) + 1.0 / tot_entity
tr_h = tr_h * (1.0 - label_smoothing) + 1.0 / tot_entity
loss_heads = torch.mean(torch.nn.BCEWithLogitsLoss()(pred_heads, tr_h))
loss_tails = torch.mean(torch.nn.BCEWithLogitsLoss()(pred_tails, hr_t))
loss = loss_heads + loss_tails
return loss
@staticmethod
def multi_class(pred_heads, pred_tails):
loss = pred_heads + pred_tails
return loss
| 1,926 | 34.036364 | 89 | py |
pykg2vec | pykg2vec-master/pykg2vec/utils/trainer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import warnings
import torch
import torch.optim as optim
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
from pykg2vec.utils.evaluator import Evaluator
from pykg2vec.utils.visualization import Visualization
from pykg2vec.utils.riemannian_optimizer import RiemannianOptimizer
from pykg2vec.data.generator import Generator
from pykg2vec.utils.logger import Logger
from pykg2vec.common import Importer, Monitor, TrainingStrategy
warnings.filterwarnings('ignore')
class EarlyStopper:
""" Class used by trainer for handling the early stopping mechanism during the training of KGE algorithms.
Args:
patience (int): Number of epochs to wait before early stopping the training on no improvement.
No early stopping if it is a negative number (default: {-1}).
monitor (Monitor): the type of metric that earlystopper will monitor.
"""
_logger = Logger().get_logger(__name__)
def __init__(self, patience, monitor):
self.monitor = monitor
self.patience = patience
# controlling variables.
self.previous_metrics = None
self.patience_left = patience
def should_stop(self, curr_metrics):
should_stop = False
value, name = self.monitor.value, self.monitor.name
if self.previous_metrics is not None:
if self.monitor == Monitor.MEAN_RANK or self.monitor == Monitor.FILTERED_MEAN_RANK:
is_worse = self.previous_metrics[value] < curr_metrics[value]
else:
is_worse = self.previous_metrics[value] > curr_metrics[value]
if self.patience_left > 0 and is_worse:
self.patience_left -= 1
self._logger.info(
'%s more chances before the trainer stops the training. (prev_%s, curr_%s): (%.4f, %.4f)' %
(self.patience_left, name, name, self.previous_metrics[value], curr_metrics[value]))
elif self.patience_left == 0 and is_worse:
self._logger.info('Stop the training.')
should_stop = True
else:
self._logger.info('Reset the patience count to %d' % (self.patience))
self.patience_left = self.patience
self.previous_metrics = curr_metrics
return should_stop
class Trainer:
""" Class for handling the training of the algorithms.
Args:
model (object): KGE model object
Examples:
>>> from pykg2vec.utils.trainer import Trainer
>>> from pykg2vec.models.pairwise import TransE
>>> trainer = Trainer(TransE())
>>> trainer.build_model()
>>> trainer.train_model()
"""
TRAINED_MODEL_FILE_NAME = "model.vec.pt"
TRAINED_MODEL_CONFIG_NAME = "config.npy"
_logger = Logger().get_logger(__name__)
def __init__(self, model, config):
self.model = model
self.config = config
self.best_metric = None
self.monitor = None
self.training_results = []
self.evaluator = None
self.generator = None
self.optimizer = None
self.early_stopper = None
def build_model(self, monitor=Monitor.FILTERED_MEAN_RANK):
"""function to build the model"""
if self.config.load_from_data is not None:
self.load_model(self.config.load_from_data)
self.evaluator = Evaluator(self.model, self.config)
self.model.to(self.config.device)
if self.config.optimizer == "adam":
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.config.learning_rate,
)
elif self.config.optimizer == "sgd":
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.config.learning_rate,
)
elif self.config.optimizer == "adagrad":
self.optimizer = optim.Adagrad(
self.model.parameters(),
lr=self.config.learning_rate,
)
elif self.config.optimizer == "rms":
self.optimizer = optim.RMSprop(
self.model.parameters(),
lr=self.config.learning_rate,
)
elif self.config.optimizer == "riemannian":
param_names = [name for name, param in self.model.named_parameters()]
self.optimizer = RiemannianOptimizer(
self.model.parameters(),
lr=self.config.learning_rate,
param_names=param_names
)
else:
raise NotImplementedError("No support for %s optimizer" % self.config.optimizer)
self.config.summary()
self.early_stopper = EarlyStopper(self.config.patience, monitor)
# Training related functions:
def train_step_pairwise(self, pos_h, pos_r, pos_t, neg_h, neg_r, neg_t):
pos_preds = self.model(pos_h, pos_r, pos_t)
neg_preds = self.model(neg_h, neg_r, neg_t)
if self.model.model_name.lower() == "rotate":
loss = self.model.loss(pos_preds, neg_preds, self.config.neg_rate, self.config.alpha)
else:
loss = self.model.loss(pos_preds, neg_preds, self.config.margin)
loss += self.model.get_reg(None, None, None)
return loss
def train_step_projection(self, h, r, t, hr_t, tr_h):
if self.model.model_name.lower() in ["conve", "tucker", "interacte", "hyper", "acre"]:
pred_tails = self.model(h, r, direction="tail") # (h, r) -> hr_t forward
pred_heads = self.model(t, r, direction="head") # (t, r) -> tr_h backward
if hasattr(self.config, 'label_smoothing'):
loss = self.model.loss(pred_heads, pred_tails, tr_h, hr_t, self.config.label_smoothing, self.config.tot_entity)
else:
loss = self.model.loss(pred_heads, pred_tails, tr_h, hr_t, None, None)
else:
pred_tails = self.model(h, r, hr_t, direction="tail") # (h, r) -> hr_t forward
pred_heads = self.model(t, r, tr_h, direction="head") # (t, r) -> tr_h backward
loss = self.model.loss(pred_heads, pred_tails)
loss += self.model.get_reg(h, r, t)
return loss
def train_step_pointwise(self, h, r, t, target):
preds = self.model(h, r, t)
loss = self.model.loss(preds, target.type(preds.type()))
loss += self.model.get_reg(h, r, t)
return loss
def train_model(self):
# for key, value in self.config.__dict__.items():
# print(key," ",value)
#print(self.config.__dict__[""])
# pdb.set_trace()
"""Function to train the model."""
self.generator = Generator(self.model, self.config)
self.monitor = Monitor.FILTERED_MEAN_RANK
for cur_epoch_idx in range(self.config.epochs):
self._logger.info("Epoch[%d/%d]" % (cur_epoch_idx, self.config.epochs))
self.train_model_epoch(cur_epoch_idx)
if cur_epoch_idx % self.config.test_step == 0:
self.model.eval()
with torch.no_grad():
metrics = self.evaluator.mini_test(cur_epoch_idx)
if self.early_stopper.should_stop(metrics):
### Early Stop Mechanism
### start to check if the metric is still improving after each mini-test.
### Example, if test_step == 5, the trainer will check metrics every 5 epoch.
break
# store the best model weights.
if self.config.save_model:
if self.best_metric is None:
self.best_metric = metrics
self.save_model()
else:
if self.monitor == Monitor.MEAN_RANK or self.monitor == Monitor.FILTERED_MEAN_RANK:
is_better = self.best_metric[self.monitor.value] > metrics[self.monitor.value]
else:
is_better = self.best_metric[self.monitor.value] < metrics[self.monitor.value]
if is_better:
self.save_model()
self.best_metric = metrics
self.model.eval()
with torch.no_grad():
self.evaluator.full_test(cur_epoch_idx)
self.evaluator.metric_calculator.save_test_summary(self.model.model_name)
self.generator.stop()
self.save_training_result()
# if self.config.save_model:
# self.save_model()
if self.config.disp_result:
self.display()
self.export_embeddings()
return cur_epoch_idx # the runned epoches.
def tune_model(self):
"""Function to tune the model."""
current_loss = float("inf")
self.generator = Generator(self.model, self.config)
self.evaluator = Evaluator(self.model, self.config, tuning=True)
for cur_epoch_idx in range(self.config.epochs):
current_loss = self.train_model_epoch(cur_epoch_idx, tuning=True)
self.model.eval()
with torch.no_grad():
self.evaluator.full_test(cur_epoch_idx)
self.generator.stop()
return current_loss
def train_model_epoch(self, epoch_idx, tuning=False):
"""Function to train the model for one epoch."""
acc_loss = 0
num_batch = self.config.tot_train_triples // self.config.batch_size if not self.config.debug else 10
self.generator.start_one_epoch(num_batch)
progress_bar = tqdm(range(num_batch))
for _ in progress_bar:
data = list(next(self.generator))
self.model.train()
self.optimizer.zero_grad()
if self.model.training_strategy == TrainingStrategy.PROJECTION_BASED:
h = torch.LongTensor(data[0]).to(self.config.device)
r = torch.LongTensor(data[1]).to(self.config.device)
t = torch.LongTensor(data[2]).to(self.config.device)
hr_t = data[3].to(self.config.device)
tr_h = data[4].to(self.config.device)
loss = self.train_step_projection(h, r, t, hr_t, tr_h)
elif self.model.training_strategy == TrainingStrategy.POINTWISE_BASED:
h = torch.LongTensor(data[0]).to(self.config.device)
r = torch.LongTensor(data[1]).to(self.config.device)
t = torch.LongTensor(data[2]).to(self.config.device)
y = torch.LongTensor(data[3]).to(self.config.device)
loss = self.train_step_pointwise(h, r, t, y)
elif self.model.training_strategy == TrainingStrategy.PAIRWISE_BASED:
pos_h = torch.LongTensor(data[0]).to(self.config.device)
pos_r = torch.LongTensor(data[1]).to(self.config.device)
pos_t = torch.LongTensor(data[2]).to(self.config.device)
neg_h = torch.LongTensor(data[3]).to(self.config.device)
neg_r = torch.LongTensor(data[4]).to(self.config.device)
neg_t = torch.LongTensor(data[5]).to(self.config.device)
loss = self.train_step_pairwise(pos_h, pos_r, pos_t, neg_h, neg_r, neg_t)
else:
raise NotImplementedError("Unknown training strategy: %s" % self.model.training_strategy)
loss.backward()
self.optimizer.step()
acc_loss += loss.item()
if not tuning:
progress_bar.set_description('acc_loss: %f, cur_loss: %f'% (acc_loss, loss))
self.training_results.append([epoch_idx, acc_loss])
return acc_loss
def enter_interactive_mode(self):
self.build_model()
self.load_model()
self._logger.info("""The training/loading of the model has finished!
Now enter interactive mode :)
-----
Example 1: trainer.infer_tails(1,10,topk=5)""")
self.infer_tails(1, 10, topk=5)
self._logger.info("""-----
Example 2: trainer.infer_heads(10,20,topk=5)""")
self.infer_heads(10, 20, topk=5)
self._logger.info("""-----
Example 3: trainer.infer_rels(1,20,topk=5)""")
self.infer_rels(1, 20, topk=5)
def exit_interactive_mode(self):
self._logger.info("Thank you for trying out inference interactive script :)")
def infer_tails(self, h, r, topk=5):
tails = self.evaluator.test_tail_rank(h, r, topk).detach().cpu().numpy()
idx2ent = self.config.knowledge_graph.read_cache_data('idx2entity')
idx2rel = self.config.knowledge_graph.read_cache_data('idx2relation')
logs = [
"",
"(head, relation)->({},{}) :: Inferred tails->({})".format(h, r, ",".join([str(i) for i in tails])),
"",
"head: %s" % idx2ent[h],
"relation: %s" % idx2rel[r],
]
for idx, tail in enumerate(tails):
logs.append("%dth predicted tail: %s" % (idx, idx2ent[tail]))
self._logger.info("\n".join(logs))
return {tail: idx2ent[tail] for tail in tails}
def infer_heads(self, r, t, topk=5):
heads = self.evaluator.test_head_rank(r, t, topk).detach().cpu().numpy()
idx2ent = self.config.knowledge_graph.read_cache_data('idx2entity')
idx2rel = self.config.knowledge_graph.read_cache_data('idx2relation')
logs = [
"",
"(relation,tail)->({},{}) :: Inferred heads->({})".format(t, r, ",".join([str(i) for i in heads])),
"",
"tail: %s" % idx2ent[t],
"relation: %s" % idx2rel[r],
]
for idx, head in enumerate(heads):
logs.append("%dth predicted head: %s" % (idx, idx2ent[head]))
self._logger.info("\n".join(logs))
return {head: idx2ent[head] for head in heads}
def infer_rels(self, h, t, topk=5):
if self.model.model_name.lower() in ["proje_pointwise", "conve", "tucker"]:
self._logger.info("%s model doesn't support relation inference in nature.")
return {}
rels = self.evaluator.test_rel_rank(h, t, topk).detach().cpu().numpy()
idx2ent = self.config.knowledge_graph.read_cache_data('idx2entity')
idx2rel = self.config.knowledge_graph.read_cache_data('idx2relation')
logs = [
"",
"(head,tail)->({},{}) :: Inferred rels->({})".format(h, t, ",".join([str(i) for i in rels])),
"",
"head: %s" % idx2ent[h],
"tail: %s" % idx2ent[t],
]
for idx, rel in enumerate(rels):
logs.append("%dth predicted rel: %s" % (idx, idx2rel[rel]))
self._logger.info("\n".join(logs))
return {rel: idx2rel[rel] for rel in rels}
# ''' Procedural functions:'''
def save_model(self):
"""Function to save the model."""
saved_path = self.config.path_tmp / self.model.model_name
saved_path.mkdir(parents=True, exist_ok=True)
torch.save(self.model.state_dict(), str(saved_path / self.TRAINED_MODEL_FILE_NAME))
# Save hyper-parameters into a yaml file with the model
save_path_config = saved_path / self.TRAINED_MODEL_CONFIG_NAME
np.save(save_path_config, self.config)
def load_model(self, model_path=None):
"""Function to load the model."""
if model_path is None:
model_path_file = self.config.path_tmp / self.model.model_name / self.TRAINED_MODEL_FILE_NAME
model_path_config = self.config.path_tmp / self.model.model_name / self.TRAINED_MODEL_CONFIG_NAME
else:
model_path = Path(model_path)
model_path_file = model_path / self.TRAINED_MODEL_FILE_NAME
model_path_config = model_path / self.TRAINED_MODEL_CONFIG_NAME
if model_path_file.exists() and model_path_config.exists():
config_temp = np.load(model_path_config, allow_pickle=True).item()
config_temp.__dict__['load_from_data'] = self.config.__dict__['load_from_data']
self.config = config_temp
_, model_def = Importer().import_model_config(self.config.model_name.lower())
self.model = model_def(**self.config.__dict__)
self.model.load_state_dict(torch.load(str(model_path_file)))
self.model.eval()
else:
raise ValueError("Cannot load model from %s" % model_path_file)
def display(self):
"""Function to display embedding."""
options = {"ent_only_plot": True,
"rel_only_plot": not self.config.plot_entity_only,
"ent_and_rel_plot": not self.config.plot_entity_only}
if self.config.plot_embedding:
viz = Visualization(self.model, self.config, vis_opts=options)
viz.plot_embedding(resultpath=self.config.path_figures, algos=self.model.model_name, show_label=False)
if self.config.plot_training_result:
viz = Visualization(self.model, self.config)
viz.plot_train_result()
if self.config.plot_testing_result:
viz = Visualization(self.model, self.config)
viz.plot_test_result()
def export_embeddings(self):
"""
Export embeddings in tsv and pandas pickled format.
With tsvs (both label, vector files), you can:
1) Use those pretained embeddings for your applications.
2) Visualize the embeddings in this website to gain insights. (https://projector.tensorflow.org/)
Pandas dataframes can be read with pd.read_pickle('desired_file.pickle')
"""
save_path = self.config.path_embeddings / self.model.model_name
save_path.mkdir(parents=True, exist_ok=True)
idx2ent = self.config.knowledge_graph.read_cache_data('idx2entity')
idx2rel = self.config.knowledge_graph.read_cache_data('idx2relation')
with open(str(save_path / "ent_labels.tsv"), 'w') as l_export_file:
for label in idx2ent.values():
l_export_file.write(label + "\n")
with open(str(save_path / "rel_labels.tsv"), 'w') as l_export_file:
for label in idx2rel.values():
l_export_file.write(label + "\n")
for named_embedding in self.model.parameter_list:
all_ids = list(range(0, int(named_embedding.weight.shape[0])))
stored_name = named_embedding.name
if len(named_embedding.weight.shape) == 2:
all_embs = named_embedding.weight.detach().detach().cpu().numpy()
with open(str(save_path / ("%s.tsv" % stored_name)), 'w') as v_export_file:
for idx in all_ids:
v_export_file.write("\t".join([str(x) for x in all_embs[idx]]) + "\n")
def save_training_result(self):
"""Function that saves training result"""
files = os.listdir(str(self.config.path_result))
l = len([f for f in files if self.model.model_name in f if 'Training' in f])
df = pd.DataFrame(self.training_results, columns=['Epochs', 'Loss'])
with open(str(self.config.path_result / (self.model.model_name + '_Training_results_' + str(l) + '.csv')),
'w') as fh:
df.to_csv(fh)
| 19,650 | 39.85447 | 127 | py |
pykg2vec | pykg2vec-master/pykg2vec/data/generator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for generating the batch data for training and testing.
"""
import torch
import numpy as np
from multiprocessing import Process, Queue
from pykg2vec.common import TrainingStrategy
def raw_data_generator(command_queue, raw_queue, config):
"""Function to feed triples to raw queue for multiprocessing.
Args:
command_queue (Queue) : Each enqueued is either a command or a number of batch size.
raw_queue (Queue) : Multiprocessing Queue to put the raw data to be processed.
config (pykg2vec.Config) : Consists of the necessary parameters for training configuration.
"""
data = config.knowledge_graph.read_cache_data('triplets_train')
number_of_batch = len(data) // config.batch_size
random_ids = np.random.permutation(len(data))
while True:
command = command_queue.get()
if command != "quit":
number_of_batch = command
for batch_idx in range(number_of_batch):
pos_start = config.batch_size * batch_idx
pos_end = config.batch_size * (batch_idx + 1)
raw_data = np.asarray([[data[x].h, data[x].r, data[x].t] for x in random_ids[pos_start:pos_end]])
raw_queue.put((batch_idx, raw_data))
else:
raw_queue.put(None)
raw_queue.put(None)
return
def process_function_pairwise(raw_queue, processed_queue, config):
"""Function that puts the processed data in the queue.
Args:
raw_queue (Queue) : Multiprocessing Queue to put the raw data to be processed.
processed_queue (Queue) : Multiprocessing Queue to put the processed data.
config (pykg2vec.Config) : Consists of the necessary parameters for training configuration.
"""
data = config.knowledge_graph.read_cache_data('triplets_train')
relation_property = config.knowledge_graph.read_cache_data('relationproperty')
positive_triplets = {(t.h, t.r, t.t): 1 for t in data}
neg_rate = config.neg_rate
del data # save memory space
while True:
item = raw_queue.get()
if item is None:
return
_, pos_triples = item
ph = pos_triples[:, 0]
pr = pos_triples[:, 1]
pt = pos_triples[:, 2]
nh = []
nr = []
nt = []
for t in pos_triples:
prob = relation_property[t[1]] if config.sampling == "bern" else 0.5
for _ in range(neg_rate):
if np.random.random() > prob:
idx_replace_tail = np.random.randint(config.tot_entity)
while (t[0], t[1], idx_replace_tail) in positive_triplets:
idx_replace_tail = np.random.randint(config.tot_entity)
nh.append(t[0])
nr.append(t[1])
nt.append(idx_replace_tail)
else:
idx_replace_head = np.random.randint(config.tot_entity)
while (idx_replace_head, t[1], t[2]) in positive_triplets:
idx_replace_head = np.random.randint(config.tot_entity)
nh.append(idx_replace_head)
nr.append(t[1])
nt.append(t[2])
processed_queue.put([ph, pr, pt, nh, nr, nt])
def process_function_pointwise(raw_queue, processed_queue, config):
"""Function that puts the processed data in the queue.
Args:
raw_queue (Queue) : Multiprocessing Queue to put the raw data to be processed.
processed_queue (Queue) : Multiprocessing Queue to put the processed data.
config (pykg2vec.Config) : Consists of the necessary parameters for training configuration.
"""
data = config.knowledge_graph.read_cache_data('triplets_train')
relation_property = config.knowledge_graph.read_cache_data('relationproperty')
positive_triplets = {(t.h, t.r, t.t): 1 for t in data}
neg_rate = config.neg_rate
del data # save memory space
while True:
item = raw_queue.get()
if item is None:
return
_, pos_triples = item
point_h = []
point_r = []
point_t = []
point_y = []
for t in pos_triples:
# postive sample
point_h.append(t[0])
point_r.append(t[1])
point_t.append(t[2])
point_y.append(1)
prob = relation_property[t[1]] if config.sampling == "bern" else 0.5
for _ in range(neg_rate):
if np.random.random() > prob:
idx_replace_tail = np.random.randint(config.tot_entity)
while (t[0], t[1], idx_replace_tail) in positive_triplets:
idx_replace_tail = np.random.randint(config.tot_entity)
point_h.append(t[0])
point_r.append(t[1])
point_t.append(idx_replace_tail)
point_y.append(-1)
else:
idx_replace_head = np.random.randint(config.tot_entity)
while (idx_replace_head, t[1], t[2]) in positive_triplets:
idx_replace_head = np.random.randint(config.tot_entity)
point_h.append(idx_replace_head)
point_r.append(t[1])
point_t.append(t[2])
point_y.append(-1)
processed_queue.put([point_h, point_r, point_t, point_y])
def process_function_multiclass(raw_queue, processed_queue, config):
"""Function that puts the processed data in the queue.
Args:
raw_queue (Queue) : Multiprocessing Queue to put the raw data to be processed.
processed_queue (Queue) : Multiprocessing Queue to put the processed data.
config (pykg2vec.Config) : Consists of the necessary parameters for training configuration.
"""
def _to_sparse_i(indices):
x = []
y = []
for index in indices:
x.append(index[0])
y.append(index[1])
return [x, y]
hr_t_train = config.knowledge_graph.read_cache_data('hr_t_train')
tr_h_train = config.knowledge_graph.read_cache_data('tr_h_train')
neg_rate = config.neg_rate
shape = [config.batch_size, config.tot_entity]
while True:
item = raw_queue.get()
if item is None:
return
idx, raw_data = item
h = raw_data[:, 0]
r = raw_data[:, 1]
t = raw_data[:, 2]
indices_hr_t = []
indices_tr_h = []
neg_indices_hr_t = []
neg_indices_tr_h = []
random_ids = np.random.permutation(config.tot_entity)
for i in range(config.batch_size):
hr_t = hr_t_train[(h[i], r[i])]
tr_h = tr_h_train[(t[i], r[i])]
for idx in hr_t:
indices_hr_t.append([i, idx])
for idx in tr_h:
indices_tr_h.append([i, idx])
if neg_rate > 0:
for idx in random_ids[0:100]:
if idx not in hr_t:
neg_indices_hr_t.append([i, idx])
for idx in random_ids[0:100]:
if idx not in tr_h:
neg_indices_tr_h.append([i, idx])
values_hr_t = torch.FloatTensor([1]).repeat([len(indices_hr_t)])
values_tr_h = torch.FloatTensor([1]).repeat([len(indices_tr_h)])
if neg_rate > 0:
neg_values_hr_t = torch.FloatTensor([-1]).repeat([len(neg_indices_hr_t)])
neg_values_tr_h = torch.FloatTensor([-1]).repeat([len(neg_indices_tr_h)])
# It looks Torch sparse tensor does not work in multi processing
# so they need to be converted to dense, which is not memory efficient
# https://github.com/pytorch/pytorch/pull/27062
# https://github.com/pytorch/pytorch/issues/20248
hr_t = torch.sparse.LongTensor(torch.LongTensor(_to_sparse_i(indices_hr_t)), values_hr_t, torch.Size(shape)).to_dense()
tr_h = torch.sparse.LongTensor(torch.LongTensor(_to_sparse_i(indices_tr_h)), values_tr_h, torch.Size(shape)).to_dense()
if neg_rate > 0:
neg_hr_t = torch.sparse.LongTensor(torch.LongTensor(_to_sparse_i(neg_indices_hr_t)), neg_values_hr_t, torch.Size(shape)).to_dense()
neg_tr_h = torch.sparse.LongTensor(torch.LongTensor(_to_sparse_i(neg_indices_tr_h)), neg_values_tr_h, torch.Size(shape)).to_dense()
hr_t = hr_t.add(neg_hr_t)
tr_h = tr_h.add(neg_tr_h)
processed_queue.put([h, r, t, hr_t, tr_h])
class Generator:
"""Generator class for the embedding algorithms
Args:
config (object): generator configuration object.
model_config (object): Model configuration object.
Yields:
matrix : Batch size of processed triples
Examples:
>>> from pykg2vec.utils.generator import Generator
>>> from pykg2vec.models.TransE impor TransE
>>> model = TransE()
>>> gen_train = Generator(model.config, training_strategy=TrainingStrategy.PAIRWISE_BASED)
"""
def __init__(self, model, config):
self.model = model
self.config = config
self.training_strategy = model.training_strategy
self.process_list = []
self.raw_queue_size = 10
self.processed_queue_size = 10
self.command_queue = Queue(self.raw_queue_size)
self.raw_queue = Queue(self.raw_queue_size)
self.processed_queue = Queue(self.processed_queue_size)
self.create_feeder_process()
self.create_train_processor_process()
def __iter__(self):
return self
def __next__(self):
return self.processed_queue.get()
def stop(self):
"""Function to stop all the worker process."""
self.command_queue.put("quit")
for worker_process in self.process_list:
while True:
worker_process.join(1)
if not worker_process.is_alive():
break
def create_feeder_process(self):
"""Function create the feeder process."""
feeder_worker = Process(target=raw_data_generator, args=(self.command_queue, self.raw_queue, self.config))
self.process_list.append(feeder_worker)
feeder_worker.daemon = True
feeder_worker.start()
def create_train_processor_process(self):
"""Function ro create the process for generating training samples."""
for _ in range(self.config.num_process_gen):
if self.training_strategy == TrainingStrategy.PROJECTION_BASED:
process_worker = Process(target=process_function_multiclass, args=(self.raw_queue, self.processed_queue, self.config))
elif self.training_strategy == TrainingStrategy.PAIRWISE_BASED:
process_worker = Process(target=process_function_pairwise, args=(self.raw_queue, self.processed_queue, self.config))
elif self.training_strategy == TrainingStrategy.POINTWISE_BASED:
process_worker = Process(target=process_function_pointwise, args=(self.raw_queue, self.processed_queue, self.config))
else:
raise NotImplementedError("This strategy is not supported.")
self.process_list.append(process_worker)
process_worker.daemon = True
process_worker.start()
def start_one_epoch(self, num_batch):
self.command_queue.put(num_batch)
| 11,620 | 35.775316 | 143 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 6,803 | 40.742331 | 116 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/optimization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for OpenAI GPT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
s = 1 if x <= warmup else 0
return s*(x/warmup) + (1-s)*(0.5 * (1 + torch.cos(math.pi * x)))
def warmup_constant(x, warmup=0.002):
s = 1 if x <= warmup else 0
return s*(x/warmup) + (1-s)*1
def warmup_linear(x, warmup=0.002):
s = 1 if x <= warmup else 0
return (s*(x/warmup) + (1-s))*(1-x)
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
}
class OpenAIAdam(Optimizer):
"""Implements Open AI version of Adam algorithm with weight decay fix.
"""
def __init__(self, params, lr=required, schedule='warmup_linear', warmup=-1, t_total=-1,
b1=0.9, b2=0.999, e=1e-8, weight_decay=0,
vector_l2=False, max_grad_norm=-1, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {}".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {}".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {}".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay, vector_l2=vector_l2,
max_grad_norm=max_grad_norm)
super(OpenAIAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['b1'], group['b2']
state['step'] += 1
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['e'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Add weight decay at the end (fixed version)
if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0:
p.data.add_(-lr_scheduled * group['weight_decay'], p.data)
return loss
| 5,661 | 39.156028 | 116 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
if (len(sys.argv) != 4 and len(sys.argv) != 5) or sys.argv[1] not in [
"convert_tf_checkpoint_to_pytorch",
"convert_openai_checkpoint",
"convert_transfo_xl_checkpoint",
"convert_gpt2_checkpoint",
]:
print(
"Should be used as one of: \n"
">> `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n"
">> `pytorch_pretrained_bert convert_openai_checkpoint OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n"
">> `pytorch_pretrained_bert convert_transfo_xl_checkpoint TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n"
">> `pytorch_pretrained_bert convert_gpt2_checkpoint TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]`")
else:
if sys.argv[1] == "convert_tf_checkpoint_to_pytorch":
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_openai_checkpoint":
from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
OPENAI_GPT_CONFIG = sys.argv[4]
else:
OPENAI_GPT_CONFIG = ""
convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
OPENAI_GPT_CONFIG,
PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_transfo_xl_checkpoint":
try:
from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if 'ckpt' in sys.argv[2].lower():
TF_CHECKPOINT = sys.argv[2]
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = sys.argv[2]
TF_CHECKPOINT = ""
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
else:
try:
from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
TF_CHECKPOINT = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 4,393 | 51.309524 | 145 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/convert_gpt2_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from external.pytorch_pretrained_bert.modeling_gpt2 import (CONFIG_NAME, WEIGHTS_NAME,
GPT2Config,
GPT2Model,
load_tf_weights_in_gpt2)
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
# Construct model
if gpt2_config_file == "":
config = GPT2Config()
else:
config = GPT2Config(gpt2_config_file)
model = GPT2Model(config)
# Load weights from numpy
load_tf_weights_in_gpt2(model, gpt2_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--gpt2_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--gpt2_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path,
args.gpt2_config_file,
args.pytorch_dump_folder_path)
| 3,046 | 40.739726 | 111 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from external.pytorch_pretrained_bert.modeling_openai import (CONFIG_NAME, WEIGHTS_NAME,
OpenAIGPTConfig,
OpenAIGPTModel,
load_tf_weights_in_openai_gpt)
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--openai_checkpoint_folder_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--openai_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(args.openai_checkpoint_folder_path,
args.openai_config_file,
args.pytorch_dump_folder_path)
| 3,141 | 42.041096 | 118 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, output_attention_probs=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if output_attention_probs:
return context_layer, attention_probs
else:
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, output_attention_probs=False):
self_output = self.self(input_tensor, attention_mask, output_attention_probs=output_attention_probs)
if output_attention_probs:
self_output, attention_probs = self_output
attention_output = self.output(self_output, input_tensor)
if output_attention_probs:
return attention_output, attention_probs
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, output_attention_probs=False):
attention_output = self.attention(hidden_states, attention_mask, output_attention_probs=output_attention_probs)
if output_attention_probs:
attention_output, attention_probs = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if output_attention_probs:
return layer_output, attention_probs
else:
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, output_attention_probs=False):
all_encoder_layers = []
all_attention_probs = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask, output_attention_probs=output_attention_probs)
if output_attention_probs:
hidden_states, attention_probs = hidden_states
all_attention_probs.append(attention_probs)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if output_attention_probs:
return all_encoder_layers, all_attention_probs
else:
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
| 60,198 | 48.18219 | 139 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json"}
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'w' or l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'wpe' or l[0] == 'wte':
pointer = getattr(pointer, l[0])
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class GPT2Config(object):
"""Configuration class to store the configuration of a `GPT2Model`.
"""
def __init__(
self,
vocab_size_or_config_json_file=50257,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `GPT2Config` from a Python dictionary of parameters."""
config = GPT2Config(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns-nd:ns, :ns]
w = w * b - 1e10 * (1 - b)
w = nn.Softmax(dim=-1)(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
return a, present
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return h2
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None):
a, present = self.attn(self.ln_1(x), layer_past=layer_past)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
return x, present
class GPT2LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(GPT2LMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
class GPT2MultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(GPT2MultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class GPT2PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(GPT2PreTrainedModel, self).__init__()
if not isinstance(config, GPT2Config):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def set_tied(self):
pass
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
):
"""
Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `openai-gpt`
- a path or url to a pretrained model archive containing:
. `gpt2_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a GPT2Model instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. a TensorFlow checkpoint with trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = GPT2Config.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu' if not torch.cuda.is_available() else None)
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_gpt2(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Make sure we are still sharing the output and input embeddings after loading weights
model.set_tied()
return model
class GPT2Model(GPT2PreTrainedModel):
"""OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
Outputs:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2Model(config)
hidden_states = model(input_ids)
```
"""
def __init__(self, config):
super(GPT2Model, self).__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
block = Block(config.n_ctx, config, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.apply(self.init_weights)
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
presents = []
for block, layer_past in zip(self.h, past):
hidden_states, present = block(hidden_states, layer_past)
presents.append(present)
hidden_states = self.ln_f(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape), presents
class GPT2LMHeadModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size]
(or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2LMHeadModel(config)
lm_logits = model(input_ids)
```
"""
def __init__(self, config):
super(GPT2LMHeadModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.apply(self.init_weights)
def set_tied(self):
""" Make sure we are sharing the embeddings
"""
self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None):
hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1))
return loss
return lm_logits, presents
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, config.vocab_size[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., config.vocab_size]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2LMHeadModel(config)
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config):
super(GPT2DoubleHeadsModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.multiple_choice_head = GPT2MultipleChoiceHead(config)
self.apply(self.init_weights)
def set_tied(self):
""" Make sure we are sharing the embeddings
"""
self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, past=None):
hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
return lm_logits, mc_logits, presents
| 29,887 | 42.632117 | 146 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
print("Loading weights...")
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'w':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
class OpenAIGPTConfig(object):
"""Configuration class to store the configuration of a `OpenAIGPTModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file=40478,
n_special=0,
n_positions=512,
n_ctx=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
):
"""Constructs OpenAIGPTConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
afn: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_special = n_special
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@classmethod
def from_dict(cls, json_object):
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `OpenAIGPTConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x):
a = self.attn(x)
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
return h
class OpenAIGPTLMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(OpenAIGPTLMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
class OpenAIGPTMultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTMultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
# self.multiple_choice_token = multiple_choice_token
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(OpenAIGPTPreTrainedModel, self).__init__()
if not isinstance(config, OpenAIGPTConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, num_special_tokens=None, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
):
"""
Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `openai-gpt`
- a path or url to a pretrained model archive containing:
. `openai_gpt_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. a series of NumPy files containing OpenAI TensorFlow trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = OpenAIGPTConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu' if not torch.cuda.is_available() else None)
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Add additional embeddings for special tokens if needed
# This step also make sure we are still sharing the output and input embeddings after loading weights
model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
return model
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
Outputs:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTModel(config)
hidden_states = model(input_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTModel, self).__init__(config)
num_tokens = config.vocab_size + config.n_special
self.tokens_embed = nn.Embedding(num_tokens, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
block = Block(config.n_ctx, config, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.apply(self.init_weights)
# nn.init.normal_(self.embed.weight, std=0.02)
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# # Build new embeddings and initialize
old_embed = self.tokens_embed
self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
# Initialize all new embeddings (in particular the special tokens)
self.init_weights(self.tokens_embed)
# Copy word and positional embeddings from the previous weights
self.tokens_embed.weight.data[: self.config.vocab_size, :] = old_embed.weight.data[: self.config.vocab_size, :]
self.tokens_embed.weight.data[-self.config.n_positions :, :] = old_embed.weight.data[-self.config.n_positions :, :]
def forward(self, input_ids, position_ids=None, token_type_ids=None):
if position_ids is None:
# This was used when we had a single embedding matrice from position and token embeddings
# start = self.config.vocab_size + self.config.n_special
# end = start + input_ids.size(-1)
# position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
# Add the position information to the input embeddings
# h = e.sum(dim=2)
hidden_states = inputs_embeds + position_embeds + token_type_embeds
for block in self.h:
hidden_states = block(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
(or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTLMHeadModel(config)
lm_logits = model(input_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTLMHeadModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1))
return loss
return lm_logits
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, total_tokens_embeddings[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., total_tokens_embeddings]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTLMHeadModel(config)
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
return lm_logits, mc_logits
| 37,647 | 45.421702 | 152 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/convert_transfo_xl_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Transformer XL checkpoint and datasets."""
from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
from io import open
import torch
import external.pytorch_pretrained_bert.tokenization_transfo_xl as data_utils
from external.pytorch_pretrained_bert import (CONFIG_NAME,
WEIGHTS_NAME,
TransfoXLConfig,
TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl)
from external.pytorch_pretrained_bert.tokenization_transfo_xl import (CORPUS_NAME,
VOCAB_NAME)
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
data_utils.Vocab = data_utils.TransfoXLTokenizer
data_utils.Corpus = data_utils.TransfoXLCorpus
sys.modules['data_utils'] = data_utils
sys.modules['vocabulary'] = data_utils
def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path,
transfo_xl_config_file,
pytorch_dump_folder_path,
transfo_xl_dataset_file):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_NAME
print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME
print("Save dataset to {}".format(pytorch_dataset_dump_path))
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig(transfo_xl_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the folder to store the PyTorch model or dataset/vocab.")
parser.add_argument("--tf_checkpoint_path",
default = "",
type = str,
help = "An optional path to a TensorFlow checkpoint path to be converted.")
parser.add_argument("--transfo_xl_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--transfo_xl_dataset_file",
default = "",
type = str,
help = "An optional dataset file to be converted in a vocabulary.")
args = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file)
| 5,642 | 47.230769 | 121 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except AttributeError:
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,280 | 32.124 | 112 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import torch
from external.pytorch_pretrained_bert.modeling import BertConfig, BertForPreTraining, load_tf_weights_in_bert
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default = None,
type = str,
required = True,
help = "The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.bert_config_file,
args.pytorch_dump_path)
| 2,538 | 39.301587 | 109 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/modeling_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import collections
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling import BertLayerNorm as LayerNorm
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json",
}
CONFIG_NAME = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
print("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
class TransfoXLConfig(object):
"""Configuration class to store the configuration of a `TransfoXLModel`.
"""
def __init__(self,
vocab_size_or_config_json_file=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
tgt_len=128,
ext_len=0,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
tie_weight=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02):
"""Constructs TransfoXLConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file.
cutoffs: cutoffs for the adaptive softmax
d_model: Dimensionality of the model's hidden states.
d_embed: Dimensionality of the embeddings
d_head: Dimensionality of the model's heads.
div_val: divident value for adapative input and softmax
pre_lnorm: apply LayerNorm to the input instead of the output
d_inner: Inner dimension in FF
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
tgt_len: number of tokens to predict
ext_len: length of the extended context
mem_len: length of the retained previous heads
same_length: use the same attn length for all tokens
proj_share_all_but_first: True to share all but first projs, False not to share.
attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
clamp_len: use the same pos embeddings after clamp_len
sample_softmax: number of samples in sampled softmax
adaptive: use adaptive softmax
tie_weight: tie the word embedding and softmax weights
dropout: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
dropatt: The dropout ratio for the attention probabilities.
untie_r: untie relative position biases
embd_pdrop: The dropout ratio for the embeddings.
init: parameter initializer to use
init_range: parameters initialized by U(-init_range, init_range).
proj_init_std: parameters initialized by N(0, init_std)
init_std: parameters initialized by N(0, init_std)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.n_token = vocab_size_or_config_json_file
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `TransfoXLConfig` from a Python dictionary of parameters."""
config = TransfoXLConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `TransfoXLConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False, r_r_bias=None, r_w_bias=None):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False,
r_r_bias=None, r_w_bias=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(TransfoXLPreTrainedModel, self).__init__()
if not isinstance(config, TransfoXLConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `TransfoXLConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weight(self, weight):
if self.config.init == 'uniform':
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == 'normal':
nn.init.normal_(weight, 0.0, self.config.init_std)
def init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self.init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
self.init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
self.init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
self.init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
self.init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
self.init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
self.init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
self.init_bias(m.r_bias)
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `transfo-xl`
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
archive_file, config_file))
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = TransfoXLConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu' if not torch.cuda.is_available() else None)
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()):
start_prefix = 'transformer.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
# Make sure we are still sharing the input and output embeddings
if hasattr(model, 'tie_weights'):
model.tie_weights()
return model
class TransfoXLModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`mems`: optional memomry of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`last_hidden_state`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, self.config.d_model]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, new_mems)
```
"""
def __init__(self, config):
super(TransfoXLModel, self).__init__(config)
self.n_token = config.n_token
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs,
div_val=config.div_val)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type == 1: # learnable embeddings
for i in range(config.n_layer):
self.layers.append(
RelLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type in [2, 3]: # absolute embeddings
for i in range(config.n_layer):
self.layers.append(
DecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.apply(self.init_weights)
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self, data):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model,
dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, input_ids, mems=None):
""" Params:
input_ids :: [bsz, len]
mems :: optional mems from previous forwar passes (or init_mems)
list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Returns:
tuple (last_hidden, new_mems) where:
new_mems: list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
last_hidden: output of the last layer:
shape :: [bsz, len, self.config.d_model]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
input_ids = input_ids.transpose(0, 1).contiguous()
if mems is None:
mems = self.init_mems(input_ids)
last_hidden, new_mems = self._forward(input_ids, mems=mems)
# We transpose back here to shape [bsz, len, hidden_dim]
last_hidden = last_hidden.transpose(0, 1).contiguous()
return (last_hidden, new_mems)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
This model add an (adaptive) softmax head on top of the TransfoXLModel
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Call self.tie_weights() if you update/load the weights of the transformer to keep the weights tied.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`target`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the target token indices selected in the range [0, self.config.n_token[
`mems`: an optional memory of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`softmax_output`: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape [batch_size, sequence_length]
else:
log probabilities of tokens, shape [batch_size, sequence_length, n_tokens]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, mems=new_mems)
```
"""
def __init__(self, config):
super(TransfoXLLMHeadModel, self).__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
# use sampled softmax
if config.sample_softmax > 0:
self.out_layer = nn.Linear(config.d_model, config.n_token)
self.sampler = LogUniformSampler(config.n_token, config.sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model,
config.cutoffs, div_val=config.div_val)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, data):
return self.transformer.init_mems(data)
def forward(self, input_ids, target=None, mems=None):
""" Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens]
"""
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
# We transpose back
return (softmax_output, new_mems)
| 58,702 | 41.476845 | 131 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/tokenization_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import logging
import os
import sys
from collections import Counter, OrderedDict
from io import open
import unicodedata
import torch
import numpy as np
from .file_utils import cached_path
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin",
}
VOCAB_NAME = 'vocab.bin'
PRETRAINED_CORPUS_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin",
}
CORPUS_NAME = 'corpus.bin'
class TransfoXLTokenizer(object):
"""
Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a TransfoXLTokenizer.
The TransfoXLTokenizer.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(*inputs, **kwargs)
vocab_dict = torch.load(resolved_vocab_file)
for key, value in vocab_dict.items():
tokenizer.__dict__[key] = value
return tokenizer
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=False,
delimiter=None, vocab_file=None, never_split=("<unk>", "<eos>", "<formula>")):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
self.never_split = never_split
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if '<UNK>' in self.sym2idx:
self.unk_idx = self.sym2idx['<UNK>']
elif '<unk>' in self.sym2idx:
self.unk_idx = self.sym2idx['<unk>']
else:
raise ValueError('No <unkown> token in vocabulary')
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
# assert '<eos>' not in sym
if hasattr(self, 'unk_idx'):
return self.sym2idx.get(sym, self.unk_idx)
# Backward compatibility with pre-trained models
elif '<unk>' in self.sym2idx:
return self.sym2idx['<unk>']
elif '<UNK>' in self.sym2idx:
return self.sym2idx['<UNK>']
else:
raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement')
def convert_ids_to_tokens(self, indices):
"""Converts a sequence of indices in symbols using the vocab."""
return [self.get_sym(idx) for idx in indices]
def convert_tokens_to_ids(self, symbols):
"""Converts a sequence of symbols into ids using the vocab."""
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.convert_tokens_to_ids(symbols))
def decode(self, indices, exclude=None):
"""Converts a sequence of indices in a string."""
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def whitespace_tokenize(self, text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
if self.delimiter == '':
tokens = text
else:
tokens = text.split(self.delimiter)
return tokens
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = self._clean_text(line)
line = line.strip()
symbols = self.whitespace_tokenize(line)
split_symbols = []
for symbol in symbols:
if self.lower_case and symbol not in self.never_split:
symbol = symbol.lower()
symbol = self._run_strip_accents(symbol)
split_symbols.extend(self._run_split_on_punc(symbol))
if add_double_eos: # lm1b
return ['<S>'] + split_symbols + ['<S>']
elif add_eos:
return split_symbols + ['<eos>']
else:
return split_symbols
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
def get_batch(self, i, bptt=None):
if bptt is None: bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i+1:i+1+seq_len]
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
return data_out, target_out, seq_len
def get_fixlen_iter(self, start=0):
for i in range(start, self.data.size(0) - 1, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
streams[i][:n_new]
target[n_filled:n_filled+n_new, i] = \
streams[i][1:n_new+1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
yield data_out, target_out, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class TransfoXLCorpus(object):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a pre-processed corpus.
"""
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
# redirect to the cache, if necessary
try:
resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Corpus '{}' was not found in corpus list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
corpus_file))
return None
if resolved_corpus_file == corpus_file:
logger.info("loading corpus file {}".format(corpus_file))
else:
logger.info("loading corpus file {} from cache at {}".format(
corpus_file, resolved_corpus_file))
# Instantiate tokenizer.
corpus = cls(*inputs, **kwargs)
corpus_dict = torch.load(resolved_corpus_file)
for key, value in corpus_dict.items():
corpus.__dict__[key] = value
corpus.vocab = vocab
if corpus.train is not None:
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
if corpus.valid is not None:
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
if corpus.test is not None:
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
return corpus
def __init__(self, *args, **kwargs):
self.vocab = TransfoXLTokenizer(*args, **kwargs)
self.dataset = None
self.train = None
self.valid = None
self.test = None
def build_corpus(self, path, dataset):
self.dataset = dataset
if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
self.vocab.count_file(os.path.join(path, 'train.txt'))
self.vocab.count_file(os.path.join(path, 'valid.txt'))
self.vocab.count_file(os.path.join(path, 'test.txt'))
elif self.dataset == 'wt103':
self.vocab.count_file(os.path.join(path, 'train.txt'))
elif self.dataset == 'lm1b':
train_path_pattern = os.path.join(
path, '1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled', 'news.en-*')
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ['ptb', 'wt2', 'wt103']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True)
elif self.dataset in ['enwik8', 'text8']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
elif self.dataset == 'lm1b':
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == 'train':
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == 'lm1b':
kwargs['shuffle'] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ['valid', 'test']:
data = self.valid if split == 'valid' else self.test
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == 'lm1b':
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, 'cache.pt')
fn_pickle = os.path.join(datadir, 'cache.pkl')
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn_pickle)
elif os.path.exists(fn):
print('Loading cached dataset from pickle...')
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
print('Producing dataset {}...'.format(dataset))
kwargs = {}
if dataset in ['wt103', 'wt2']:
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = False
elif dataset == 'ptb':
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = True
elif dataset == 'lm1b':
kwargs['special'] = []
kwargs['lower_case'] = False
kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
elif dataset in ['enwik8', 'text8']:
pass
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 24,851 | 35.927192 | 109 | py |
BertGen | BertGen-master/external/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utilities for PyTorch Transformer XL model.
Directly adapted from https://github.com/kimiyoung/transformer-xl.
"""
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target=None, keep_order=False):
'''
Params:
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
Return:
if target is None:
out :: [len*bsz] Negative log likelihood
else:
out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
We could replace this implementation by the native PyTorch one
if their's had an option to set bias on all clusters in the native one.
here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
'''
if target is not None:
target = target.view(-1)
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
if target is not None:
output = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
output = F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if target is None:
out = hidden.new_empty((head_logit.size(0), self.n_token))
else:
out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if target is not None:
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = hidden.index_select(0, indices_i)
else:
hidden_i = hidden
if i == 0:
if target is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
if target is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
out[:, l_idx:r_idx] = logprob_i
if target is not None:
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0, indices_i, -logprob_i)
else:
out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def log_prob(self, hidden):
r""" Computes log probabilities for all :math:`n\_classes`
From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
Args:
hidden (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, in\_features)`
- Output: :math:`(N, n\_classes)`
"""
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
return F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
out = hidden.new_empty((head_logit.size(0), self.n_token))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
out[:, start_idx, stop_idx] = logprob_i
return out
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
# class LogUniformSampler(object):
# def __init__(self, range_max, unique=False):
# """
# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
# """
# self.range_max = range_max
# log_indices = torch.arange(1., range_max+2., 1.).log_()
# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# self.unique = unique
# if self.unique:
# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
# def sample(self, n_sample, labels):
# pos_sample, new_labels = labels.unique(return_inverse=True)
# n_pos_sample = pos_sample.size(0)
# n_neg_sample = n_sample - n_pos_sample
# if self.unique:
# self.exclude_mask.index_fill_(0, pos_sample, 1)
# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
# self.exclude_mask.index_fill_(0, pos_sample, 0)
# else:
# sample_dist = self.dist
# neg_sample = torch.multinomial(sample_dist, n_neg_sample)
# sample = torch.cat([pos_sample, neg_sample])
# sample_prob = self.dist[sample]
# return new_labels, sample, sample_prob
if __name__ == '__main__':
S, B = 3, 4
n_vocab = 10000
n_sample = 5
H = 32
labels = torch.LongTensor(S, B).random_(0, n_vocab)
# sampler = LogUniformSampler(n_vocab, unique=False)
# new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
sampler = LogUniformSampler(n_vocab, n_sample)#, unique=True)
# true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
# print('true_probs', true_probs.numpy().tolist())
# print('samp_probs', samp_probs.numpy().tolist())
# print('neg_samples', neg_samples.numpy().tolist())
# print('sum', torch.sum(sampler.dist).item())
# assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
embedding = nn.Embedding(n_vocab, H)
bias = torch.zeros(n_vocab)
inputs = torch.Tensor(S, B, H).normal_()
logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
print('logits', logits.detach().numpy().tolist())
print('logits shape', logits.size())
print('out_labels', out_labels.detach().numpy().tolist())
print('out_labels shape', out_labels.size())
| 16,113 | 38.985112 | 132 | py |
BertGen | BertGen-master/common/lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from bisect import bisect_right
import torch
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 1,810 | 33.169811 | 80 | py |
BertGen | BertGen-master/common/fast_rcnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from common.backbone.resnet.resnet import *
from common.backbone.resnet.resnet import Bottleneck, BasicBlock
from common.backbone.resnet.resnet import model_urls
from common.lib.roi_pooling.roi_pool import ROIPool
from common.lib.roi_pooling.roi_align import ROIAlign
from common.utils.flatten import Flattener
from common.utils.pad_sequence import pad_sequence
from common.utils.bbox import coordinate_embeddings
class FastRCNN(nn.Module):
def __init__(self, config, average_pool=True, final_dim=768, enable_cnn_reg_loss=False):
"""
:param config:
:param average_pool: whether or not to average pool the representations
:param final_dim:
:param is_train:
"""
super(FastRCNN, self).__init__()
self.average_pool = average_pool
self.enable_cnn_reg_loss = enable_cnn_reg_loss
self.final_dim = final_dim
self.image_feat_precomputed = config.NETWORK.IMAGE_FEAT_PRECOMPUTED
if self.image_feat_precomputed:
if config.NETWORK.IMAGE_SEMANTIC:
self.object_embed = torch.nn.Embedding(num_embeddings=81, embedding_dim=128)
else:
self.object_embed = None
else:
self.stride_in_1x1 = config.NETWORK.IMAGE_STRIDE_IN_1x1
self.c5_dilated = config.NETWORK.IMAGE_C5_DILATED
self.num_layers = config.NETWORK.IMAGE_NUM_LAYERS
self.pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.IMAGE_PRETRAINED,
config.NETWORK.IMAGE_PRETRAINED_EPOCH) if config.NETWORK.IMAGE_PRETRAINED != '' else None
self.output_conv5 = config.NETWORK.OUTPUT_CONV5
if self.num_layers == 18:
self.backbone = resnet18(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4])
block = BasicBlock
elif self.num_layers == 34:
self.backbone = resnet34(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4])
block = BasicBlock
elif self.num_layers == 50:
self.backbone = resnet50(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
elif self.num_layers == 101:
self.backbone = resnet101(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
elif self.num_layers == 152:
self.backbone = resnet152(pretrained=True, pretrained_model_path=self.pretrained_model_path,
expose_stages=[4], stride_in_1x1=self.stride_in_1x1)
block = Bottleneck
else:
raise NotImplemented
output_size = (14, 14)
self.roi_align = ROIAlign(output_size=output_size, spatial_scale=1.0 / 16)
if config.NETWORK.IMAGE_SEMANTIC:
self.object_embed = torch.nn.Embedding(num_embeddings=81, embedding_dim=128)
else:
self.object_embed = None
self.mask_upsample = None
self.roi_head_feature_extractor = self.backbone._make_layer(block=block, planes=512, blocks=3,
stride=2 if not self.c5_dilated else 1,
dilation=1 if not self.c5_dilated else 2,
stride_in_1x1=self.stride_in_1x1)
if average_pool:
self.head = torch.nn.Sequential(
self.roi_head_feature_extractor,
nn.AvgPool2d(7 if not self.c5_dilated else 14, stride=1),
Flattener()
)
else:
self.head = self.roi_head_feature_extractor
if config.NETWORK.IMAGE_FROZEN_BN:
for module in self.roi_head_feature_extractor.modules():
if isinstance(module, nn.BatchNorm2d):
for param in module.parameters():
param.requires_grad = False
frozen_stages = config.NETWORK.IMAGE_FROZEN_BACKBONE_STAGES
if 5 in frozen_stages:
for p in self.roi_head_feature_extractor.parameters():
p.requires_grad = False
frozen_stages = [stage for stage in frozen_stages if stage != 5]
self.backbone.frozen_parameters(frozen_stages=frozen_stages,
frozen_bn=config.NETWORK.IMAGE_FROZEN_BN)
if self.enable_cnn_reg_loss:
self.regularizing_predictor = torch.nn.Linear(2048, 81)
self.obj_downsample = torch.nn.Sequential(
torch.nn.Dropout(p=0.1),
torch.nn.Linear(2 * 2048 + (128 if config.NETWORK.IMAGE_SEMANTIC else 0), final_dim),
torch.nn.ReLU(inplace=True),
)
def init_weight(self):
if not self.image_feat_precomputed:
if self.pretrained_model_path is None:
pretrained_model = model_zoo.load_url(model_urls['resnet{}'.format(self.num_layers)])
else:
pretrained_model = torch.load(self.pretrained_model_path, map_location=lambda storage, loc: storage)
roi_head_feat_dict = {k[len('layer4.'):]: v for k, v in pretrained_model.items() if k.startswith('layer4.')}
self.roi_head_feature_extractor.load_state_dict(roi_head_feat_dict)
if self.output_conv5:
self.conv5.load_state_dict(roi_head_feat_dict)
def bn_eval(self):
if not self.image_feat_precomputed:
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval()
def forward(self, images, boxes, box_mask, im_info, classes=None, segms=None, mvrc_ops=None, mask_visual_embed=None):
"""
:param images: [batch_size, 3, im_height, im_width]
:param boxes: [batch_size, max_num_objects, 4] Padded boxes
:param box_mask: [batch_size, max_num_objects] Mask for whether or not each box is OK
:return: object reps [batch_size, max_num_objects, dim]
"""
box_inds = box_mask.nonzero()
obj_labels = classes[box_inds[:, 0], box_inds[:, 1]].type(torch.long) if classes is not None else None
assert box_inds.shape[0] > 0
if self.image_feat_precomputed:
post_roialign = boxes[box_inds[:, 0], box_inds[:, 1]][:, 4:]
boxes = boxes[:, :, :4]
else:
img_feats = self.backbone(images)
rois = torch.cat((
box_inds[:, 0, None].type(boxes.dtype),
boxes[box_inds[:, 0], box_inds[:, 1]],
), 1)
roi_align_res = self.roi_align(img_feats['body4'], rois).type(images.dtype)
if segms is not None:
pool_layers = self.head[1:]
post_roialign = self.roi_head_feature_extractor(roi_align_res)
post_roialign = post_roialign * segms[box_inds[:, 0], None, box_inds[:, 1]].to(dtype=post_roialign.dtype)
for _layer in pool_layers:
post_roialign = _layer(post_roialign)
else:
post_roialign = self.head(roi_align_res)
# Add some regularization, encouraging the model to keep giving decent enough predictions
if self.enable_cnn_reg_loss:
obj_logits = self.regularizing_predictor(post_roialign)
cnn_regularization = F.cross_entropy(obj_logits, obj_labels)[None]
feats_to_downsample = post_roialign if (self.object_embed is None or obj_labels is None) else \
torch.cat((post_roialign, self.object_embed(obj_labels)), -1)
if mvrc_ops is not None and mask_visual_embed is not None:
_to_masked = (mvrc_ops == 1)[box_inds[:, 0], box_inds[:, 1]]
feats_to_downsample[_to_masked] = mask_visual_embed
coord_embed = coordinate_embeddings(
torch.cat((boxes[box_inds[:, 0], box_inds[:, 1]], im_info[box_inds[:, 0], :2]), 1),
256
)
feats_to_downsample = torch.cat((coord_embed.view((coord_embed.shape[0], -1)), feats_to_downsample), -1)
final_feats = self.obj_downsample(feats_to_downsample)
# Reshape into a padded sequence - this is expensive and annoying but easier to implement and debug...
obj_reps = pad_sequence(final_feats, box_mask.sum(1).tolist())
post_roialign = pad_sequence(post_roialign, box_mask.sum(1).tolist())
# DataParallel compatibility
obj_reps_padded = obj_reps.new_zeros((obj_reps.shape[0], boxes.shape[1], obj_reps.shape[2]))
obj_reps_padded[:, :obj_reps.shape[1]] = obj_reps
obj_reps = obj_reps_padded
post_roialign_padded = post_roialign.new_zeros((post_roialign.shape[0], boxes.shape[1], post_roialign.shape[2]))
post_roialign_padded[:, :post_roialign.shape[1]] = post_roialign
post_roialign = post_roialign_padded
# Output
output_dict = {
'obj_reps_raw': post_roialign,
'obj_reps': obj_reps,
}
if (not self.image_feat_precomputed) and self.enable_cnn_reg_loss:
output_dict.update({'obj_logits': obj_logits,
'obj_labels': obj_labels,
'cnn_regularization_loss': cnn_regularization})
if (not self.image_feat_precomputed) and self.output_conv5:
image_feature = self.img_head(img_feats['body4'])
output_dict['image_feature'] = image_feature
return output_dict
| 10,223 | 49.117647 | 155 | py |
BertGen | BertGen-master/common/visual_linguistic_bert.py | import torch
import torch.nn as nn
from external.pytorch_pretrained_bert.modeling import BertLayerNorm, BertEncoder, BertPooler, ACT2FN, BertOnlyMLMHead
# todo: add this to config
NUM_SPECIAL_WORDS = 1000
class BaseModel(nn.Module):
def __init__(self, config, **kwargs):
self.config = config
super(BaseModel, self).__init__()
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, *args, **kwargs):
raise NotImplemented
class VisualLinguisticBert(BaseModel):
def __init__(self, config, language_pretrained_model_path=None):
super(VisualLinguisticBert, self).__init__(config)
self.config = config
# embeddings
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.end_embedding = nn.Embedding(1, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.embedding_LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.embedding_dropout = nn.Dropout(config.hidden_dropout_prob)
# for compatibility of roberta
self.position_padding_idx = config.position_padding_idx
# visual transform
self.visual_1x1_text = None
self.visual_1x1_object = None
if config.visual_size != config.hidden_size:
self.visual_1x1_text = nn.Linear(config.visual_size, config.hidden_size)
self.visual_1x1_object = nn.Linear(config.visual_size, config.hidden_size)
if config.visual_ln:
self.visual_ln_text = BertLayerNorm(config.hidden_size, eps=1e-12)
self.visual_ln_object = BertLayerNorm(config.hidden_size, eps=1e-12)
else:
visual_scale_text = nn.Parameter(torch.as_tensor(self.config.visual_scale_text_init, dtype=torch.float),
requires_grad=True)
self.register_parameter('visual_scale_text', visual_scale_text)
visual_scale_object = nn.Parameter(torch.as_tensor(self.config.visual_scale_object_init, dtype=torch.float),
requires_grad=True)
self.register_parameter('visual_scale_object', visual_scale_object)
self.encoder = BertEncoder(config)
if self.config.with_pooler:
self.pooler = BertPooler(config)
# init weights
self.apply(self.init_weights)
if config.visual_ln:
self.visual_ln_text.weight.data.fill_(self.config.visual_scale_text_init)
self.visual_ln_object.weight.data.fill_(self.config.visual_scale_object_init)
# load language pretrained model
if language_pretrained_model_path is not None:
self.load_language_pretrained_model(language_pretrained_model_path)
if config.word_embedding_frozen:
for p in self.word_embeddings.parameters():
p.requires_grad = False
self.special_word_embeddings = nn.Embedding(NUM_SPECIAL_WORDS, config.hidden_size)
self.special_word_embeddings.weight.data.copy_(self.word_embeddings.weight.data[:NUM_SPECIAL_WORDS])
def word_embeddings_wrapper(self, input_ids):
if self.config.word_embedding_frozen:
word_embeddings = self.word_embeddings(input_ids)
word_embeddings[input_ids < NUM_SPECIAL_WORDS] \
= self.special_word_embeddings(input_ids[input_ids < NUM_SPECIAL_WORDS])
return word_embeddings
else:
return self.word_embeddings(input_ids)
def forward(self,
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=True,
output_text_and_object_separately=False,
output_attention_probs=False):
# get seamless concatenate embeddings and mask
embedding_output, attention_mask, text_mask_new, object_mask_new = self.embedding(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# extended_attention_mask = 1.0 - extended_attention_mask
# extended_attention_mask[extended_attention_mask != 0] = float('-inf')
if output_attention_probs:
encoded_layers, attention_probs = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_attention_probs=output_attention_probs)
else:
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_attention_probs=output_attention_probs)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output) if self.config.with_pooler else None
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if output_text_and_object_separately:
if not output_all_encoded_layers:
encoded_layers = [encoded_layers]
encoded_layers_text = []
encoded_layers_object = []
for encoded_layer in encoded_layers:
max_text_len = text_input_ids.shape[1]
max_object_len = object_vl_embeddings.shape[1]
encoded_layer_text = encoded_layer[:, :max_text_len]
encoded_layer_object = encoded_layer.new_zeros(
(encoded_layer.shape[0], max_object_len, encoded_layer.shape[2]))
encoded_layer_object[object_mask] = encoded_layer[object_mask_new]
encoded_layers_text.append(encoded_layer_text)
encoded_layers_object.append(encoded_layer_object)
if not output_all_encoded_layers:
encoded_layers_text = encoded_layers_text[0]
encoded_layers_object = encoded_layers_object[0]
if output_attention_probs:
return encoded_layers_text, encoded_layers_object, pooled_output, attention_probs
else:
return encoded_layers_text, encoded_layers_object, pooled_output
else:
if output_attention_probs:
return encoded_layers, pooled_output, attention_probs
else:
return encoded_layers, pooled_output
def embedding(self,
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask):
text_linguistic_embedding = self.word_embeddings_wrapper(text_input_ids)
if self.visual_1x1_text is not None:
text_visual_embeddings = self.visual_1x1_text(text_visual_embeddings)
if self.config.visual_ln:
text_visual_embeddings = self.visual_ln_text(text_visual_embeddings)
else:
text_visual_embeddings *= self.visual_scale_text
text_vl_embeddings = text_linguistic_embedding + text_visual_embeddings
object_visual_embeddings = object_vl_embeddings[:, :, :self.config.visual_size]
if self.visual_1x1_object is not None:
object_visual_embeddings = self.visual_1x1_object(object_visual_embeddings)
if self.config.visual_ln:
object_visual_embeddings = self.visual_ln_object(object_visual_embeddings)
else:
object_visual_embeddings *= self.visual_scale_object
object_linguistic_embeddings = object_vl_embeddings[:, :, self.config.visual_size:]
object_vl_embeddings = object_linguistic_embeddings + object_visual_embeddings
bs = text_vl_embeddings.size(0)
vl_embed_size = text_vl_embeddings.size(-1)
max_length = (text_mask.sum(1) + object_mask.sum(1)).max() + 1
grid_ind, grid_pos = torch.meshgrid(torch.arange(bs, dtype=torch.long, device=text_vl_embeddings.device),
torch.arange(max_length, dtype=torch.long, device=text_vl_embeddings.device))
text_end = text_mask.sum(1, keepdim=True)
object_end = text_end + object_mask.sum(1, keepdim=True)
# seamlessly concatenate visual linguistic embeddings of text and object
# FM note: everything that's masked is left as zeros, everything that is not masked
# is replaced by the actual embeddings. Max length is the maximum plus 1
# of each batch of un-masked inputs (implies longest example in batch does fit..?)
# grid pos: simply 0,1,2,3,4... position id for each example in the batch
_zero_id = torch.zeros((bs, ), dtype=torch.long, device=text_vl_embeddings.device)
vl_embeddings = text_vl_embeddings.new_zeros((bs, max_length, vl_embed_size))
vl_embeddings[grid_pos < text_end] = text_vl_embeddings[text_mask]
vl_embeddings[(grid_pos >= text_end) & (grid_pos < object_end)] = object_vl_embeddings[object_mask]
vl_embeddings[grid_pos == object_end] = self.end_embedding(_zero_id)
# token type embeddings/ segment embeddings
token_type_ids = text_token_type_ids.new_zeros((bs, max_length))
token_type_ids[grid_pos < text_end] = text_token_type_ids[text_mask]
token_type_ids[(grid_pos >= text_end) & (grid_pos <= object_end)] = 2
token_type_embeddings = self.token_type_embeddings(token_type_ids)
# position embeddings
position_ids = grid_pos + self.position_padding_idx + 1
if self.config.obj_pos_id_relative:
position_ids[(grid_pos >= text_end) & (grid_pos < object_end)] \
= text_end.expand((bs, max_length))[(grid_pos >= text_end) & (grid_pos < object_end)] \
+ self.position_padding_idx + 1
position_ids[grid_pos == object_end] = (text_end + 1).squeeze(1) + self.position_padding_idx + 1
else:
assert False, "Don't use position id 510/511 for objects and [END]!!!"
position_ids[(grid_pos >= text_end) & (grid_pos < object_end)] = self.config.max_position_embeddings - 2
position_ids[grid_pos == object_end] = self.config.max_position_embeddings - 1
position_embeddings = self.position_embeddings(position_ids)
mask = text_mask.new_zeros((bs, max_length))
mask[grid_pos <= object_end] = 1
embeddings = vl_embeddings + position_embeddings + token_type_embeddings
embeddings = self.embedding_LayerNorm(embeddings)
embeddings = self.embedding_dropout(embeddings)
return embeddings, mask, grid_pos < text_end, (grid_pos >= text_end) & (grid_pos < object_end)
def load_language_pretrained_model(self, language_pretrained_model_path):
pretrained_state_dict = torch.load(language_pretrained_model_path, map_location=lambda storage, loc: storage)
encoder_pretrained_state_dict = {}
pooler_pretrained_state_dict = {}
embedding_ln_pretrained_state_dict = {}
unexpected_keys = []
for k, v in pretrained_state_dict.items():
if k.startswith('bert.'):
k = k[len('bert.'):]
elif k.startswith('roberta.'):
k = k[len('roberta.'):]
else:
unexpected_keys.append(k)
continue
if 'gamma' in k:
k = k.replace('gamma', 'weight')
if 'beta' in k:
k = k.replace('beta', 'bias')
if k.startswith('encoder.'):
k_ = k[len('encoder.'):]
if k_ in self.encoder.state_dict():
encoder_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(k)
elif k.startswith('embeddings.'):
k_ = k[len('embeddings.'):]
if k_ == 'word_embeddings.weight':
self.word_embeddings.weight.data = v.to(dtype=self.word_embeddings.weight.data.dtype,
device=self.word_embeddings.weight.data.device)
elif k_ == 'position_embeddings.weight':
self.position_embeddings.weight.data = v.to(dtype=self.position_embeddings.weight.data.dtype,
device=self.position_embeddings.weight.data.device)
elif k_ == 'token_type_embeddings.weight':
self.token_type_embeddings.weight.data[:v.size(0)] = v.to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
if v.size(0) == 1:
# Todo: roberta token type embedding
self.token_type_embeddings.weight.data[1] = v[0].clone().to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
self.token_type_embeddings.weight.data[2] = v[0].clone().to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
elif k_.startswith('LayerNorm.'):
k__ = k_[len('LayerNorm.'):]
if k__ in self.embedding_LayerNorm.state_dict():
embedding_ln_pretrained_state_dict[k__] = v
else:
unexpected_keys.append(k)
else:
unexpected_keys.append(k)
elif self.config.with_pooler and k.startswith('pooler.'):
k_ = k[len('pooler.'):]
if k_ in self.pooler.state_dict():
pooler_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(k)
else:
unexpected_keys.append(k)
if len(unexpected_keys) > 0:
print("Warnings: Unexpected keys: {}.".format(unexpected_keys))
self.embedding_LayerNorm.load_state_dict(embedding_ln_pretrained_state_dict)
self.encoder.load_state_dict(encoder_pretrained_state_dict)
if self.config.with_pooler and len(pooler_pretrained_state_dict) > 0:
self.pooler.load_state_dict(pooler_pretrained_state_dict)
class VisualLinguisticBertForPretraining(VisualLinguisticBert):
def __init__(self, config, language_pretrained_model_path=None,
with_rel_head=True, with_mlm_head=True, with_mvrc_head=True):
super(VisualLinguisticBertForPretraining, self).__init__(config, language_pretrained_model_path=None)
self.with_rel_head = with_rel_head
self.with_mlm_head = with_mlm_head
self.with_mvrc_head = with_mvrc_head
if with_rel_head:
self.relationsip_head = VisualLinguisticBertRelationshipPredictionHead(config)
if with_mlm_head:
self.mlm_head = BertOnlyMLMHead(config, self.word_embeddings.weight)
if with_mvrc_head:
self.mvrc_head = VisualLinguisticBertMVRCHead(config)
# init weights
self.apply(self.init_weights)
if config.visual_ln:
self.visual_ln_text.weight.data.fill_(self.config.visual_scale_text_init)
self.visual_ln_object.weight.data.fill_(self.config.visual_scale_object_init)
# load language pretrained model
if language_pretrained_model_path is not None:
self.load_language_pretrained_model(language_pretrained_model_path)
if config.word_embedding_frozen:
for p in self.word_embeddings.parameters():
p.requires_grad = False
if config.pos_embedding_frozen:
for p in self.position_embeddings.parameters():
p.requires_grad = False
def forward(self,
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=True,
output_text_and_object_separately=False):
text_out, object_out, pooled_rep = super(VisualLinguisticBertForPretraining, self).forward(
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=False,
output_text_and_object_separately=True
)
if self.with_rel_head:
relationship_logits = self.relationsip_head(pooled_rep)
else:
relationship_logits = None
if self.with_mlm_head:
mlm_logits = self.mlm_head(text_out)
else:
mlm_logits = None
if self.with_mvrc_head:
mvrc_logits = self.mvrc_head(object_out)
else:
mvrc_logits = None
return relationship_logits, mlm_logits, mvrc_logits
def load_language_pretrained_model(self, language_pretrained_model_path):
pretrained_state_dict = torch.load(language_pretrained_model_path, map_location=lambda storage, loc: storage)
encoder_pretrained_state_dict = {}
pooler_pretrained_state_dict = {}
embedding_ln_pretrained_state_dict = {}
relationship_head_pretrained_state_dict = {}
mlm_head_pretrained_state_dict = {}
unexpected_keys = []
for _k, v in pretrained_state_dict.items():
if _k.startswith('bert.') or _k.startswith('roberta.'):
k = _k[len('bert.'):] if _k.startswith('bert.') else _k[len('roberta.'):]
if 'gamma' in k:
k = k.replace('gamma', 'weight')
if 'beta' in k:
k = k.replace('beta', 'bias')
if k.startswith('encoder.'):
k_ = k[len('encoder.'):]
if k_ in self.encoder.state_dict():
encoder_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif k.startswith('embeddings.'):
k_ = k[len('embeddings.'):]
if k_ == 'word_embeddings.weight':
self.word_embeddings.weight.data = v.to(dtype=self.word_embeddings.weight.data.dtype,
device=self.word_embeddings.weight.data.device)
elif k_ == 'position_embeddings.weight':
self.position_embeddings.weight.data = v.to(dtype=self.position_embeddings.weight.data.dtype,
device=self.position_embeddings.weight.data.device)
elif k_ == 'token_type_embeddings.weight':
self.token_type_embeddings.weight.data[:v.size(0)] = v.to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
if v.size(0) == 1:
# Todo: roberta token type embedding
self.token_type_embeddings.weight.data[1] = v[0].to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
elif k_.startswith('LayerNorm.'):
k__ = k_[len('LayerNorm.'):]
if k__ in self.embedding_LayerNorm.state_dict():
embedding_ln_pretrained_state_dict[k__] = v
else:
unexpected_keys.append(_k)
else:
unexpected_keys.append(_k)
elif self.config.with_pooler and k.startswith('pooler.'):
k_ = k[len('pooler.'):]
if k_ in self.pooler.state_dict():
pooler_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif _k.startswith('cls.seq_relationship.') and self.with_rel_head:
k_ = _k[len('cls.seq_relationship.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
if k_ in self.relationsip_head.caption_image_relationship.state_dict():
relationship_head_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif (_k.startswith('cls.predictions.') or _k.startswith('lm_head.')) and self.with_mlm_head:
k_ = _k[len('cls.predictions.'):] if _k.startswith('cls.predictions.') else _k[len('lm_head.'):]
if _k.startswith('lm_head.'):
if 'dense' in k_ or 'layer_norm' in k_:
k_ = 'transform.' + k_
if 'layer_norm' in k_:
k_ = k_.replace('layer_norm', 'LayerNorm')
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
if k_ in self.mlm_head.predictions.state_dict():
mlm_head_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
else:
unexpected_keys.append(_k)
if len(unexpected_keys) > 0:
print("Warnings: Unexpected keys: {}.".format(unexpected_keys))
self.embedding_LayerNorm.load_state_dict(embedding_ln_pretrained_state_dict)
self.encoder.load_state_dict(encoder_pretrained_state_dict)
if self.config.with_pooler and len(pooler_pretrained_state_dict) > 0:
self.pooler.load_state_dict(pooler_pretrained_state_dict)
if self.with_rel_head and len(relationship_head_pretrained_state_dict) > 0 and \
relationship_head_pretrained_state_dict['weight'].shape[0] == self.relationsip_head.caption_image_relationship.weight.shape[0]:
self.relationsip_head.caption_image_relationship.load_state_dict(relationship_head_pretrained_state_dict)
if self.with_mlm_head:
self.mlm_head.predictions.load_state_dict(mlm_head_pretrained_state_dict)
# TODO: load MVRC head
class VisualLinguisticBertMVRCHeadTransform(BaseModel):
def __init__(self, config):
super(VisualLinguisticBertMVRCHeadTransform, self).__init__(config)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.act = ACT2FN[config.hidden_act]
self.apply(self.init_weights)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.act(hidden_states)
return hidden_states
class VisualLinguisticBertMVRCHead(BaseModel):
def __init__(self, config):
super(VisualLinguisticBertMVRCHead, self).__init__(config)
self.transform = VisualLinguisticBertMVRCHeadTransform(config)
self.region_cls_pred = nn.Linear(config.hidden_size, config.visual_region_classes)
self.apply(self.init_weights)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
logits = self.region_cls_pred(hidden_states)
return logits
class VisualLinguisticBertRelationshipPredictionHead(BaseModel):
def __init__(self, config):
super(VisualLinguisticBertRelationshipPredictionHead, self).__init__(config)
# FM edit - change to single output
self.caption_image_relationship = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
def forward(self, pooled_rep):
relationship_logits = self.caption_image_relationship(pooled_rep)
return relationship_logits
# FM added: Class created to get access to sentence embeddings
# for retrieval tasks
# (code is based on existing VL-BERT with changes where necessary)
class VisualLinguisticBertForDistance(VisualLinguisticBert):
def __init__(self, config, language_pretrained_model_path=None,
with_rel_head=True, with_mlm_head=True, with_mvrc_head=True):
super(VisualLinguisticBertForDistance, self).__init__(config, language_pretrained_model_path=None)
self.with_rel_head = with_rel_head
self.with_mlm_head = with_mlm_head
self.with_mvrc_head = with_mvrc_head
if with_rel_head:
self.relationsip_head = VisualLinguisticBertRelationshipPredictionHead(config)
if with_mlm_head:
self.mlm_head = BertOnlyMLMHead(config, self.word_embeddings.weight)
if with_mvrc_head:
self.mvrc_head = VisualLinguisticBertMVRCHead(config)
# init weights
self.apply(self.init_weights)
if config.visual_ln:
self.visual_ln_text.weight.data.fill_(self.config.visual_scale_text_init)
self.visual_ln_object.weight.data.fill_(self.config.visual_scale_object_init)
# load language pretrained model
if language_pretrained_model_path is not None:
self.load_language_pretrained_model(language_pretrained_model_path)
if config.word_embedding_frozen:
for p in self.word_embeddings.parameters():
p.requires_grad = False
if config.pos_embedding_frozen:
for p in self.position_embeddings.parameters():
p.requires_grad = False
def forward(self,
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=True,
output_text_and_object_separately=False):
text_out, object_out, pooled_rep = super(VisualLinguisticBertForDistance, self).forward(
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=False,
output_text_and_object_separately=True
)
if self.with_rel_head:
relationship_logits = self.relationsip_head(pooled_rep)
else:
relationship_logits = None
if self.with_mlm_head:
mlm_logits = self.mlm_head(text_out)
else:
mlm_logits = None
if self.with_mvrc_head:
mvrc_logits = self.mvrc_head(object_out)
else:
mvrc_logits = None
return relationship_logits, mlm_logits, mvrc_logits, pooled_rep, text_out
def load_language_pretrained_model(self, language_pretrained_model_path):
pretrained_state_dict = torch.load(language_pretrained_model_path, map_location=lambda storage, loc: storage)
encoder_pretrained_state_dict = {}
pooler_pretrained_state_dict = {}
embedding_ln_pretrained_state_dict = {}
relationship_head_pretrained_state_dict = {}
mlm_head_pretrained_state_dict = {}
unexpected_keys = []
for _k, v in pretrained_state_dict.items():
if _k.startswith('bert.') or _k.startswith('roberta.'):
k = _k[len('bert.'):] if _k.startswith('bert.') else _k[len('roberta.'):]
if 'gamma' in k:
k = k.replace('gamma', 'weight')
if 'beta' in k:
k = k.replace('beta', 'bias')
if k.startswith('encoder.'):
k_ = k[len('encoder.'):]
if k_ in self.encoder.state_dict():
encoder_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif k.startswith('embeddings.'):
k_ = k[len('embeddings.'):]
if k_ == 'word_embeddings.weight':
self.word_embeddings.weight.data = v.to(dtype=self.word_embeddings.weight.data.dtype,
device=self.word_embeddings.weight.data.device)
elif k_ == 'position_embeddings.weight':
self.position_embeddings.weight.data = v.to(dtype=self.position_embeddings.weight.data.dtype,
device=self.position_embeddings.weight.data.device)
elif k_ == 'token_type_embeddings.weight':
self.token_type_embeddings.weight.data[:v.size(0)] = v.to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
if v.size(0) == 1:
# Todo: roberta token type embedding
self.token_type_embeddings.weight.data[1] = v[0].to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
elif k_.startswith('LayerNorm.'):
k__ = k_[len('LayerNorm.'):]
if k__ in self.embedding_LayerNorm.state_dict():
embedding_ln_pretrained_state_dict[k__] = v
else:
unexpected_keys.append(_k)
else:
unexpected_keys.append(_k)
elif self.config.with_pooler and k.startswith('pooler.'):
k_ = k[len('pooler.'):]
if k_ in self.pooler.state_dict():
pooler_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif _k.startswith('cls.seq_relationship.') and self.with_rel_head:
k_ = _k[len('cls.seq_relationship.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
if k_ in self.relationsip_head.caption_image_relationship.state_dict():
relationship_head_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif (_k.startswith('cls.predictions.') or _k.startswith('lm_head.')) and self.with_mlm_head:
k_ = _k[len('cls.predictions.'):] if _k.startswith('cls.predictions.') else _k[len('lm_head.'):]
if _k.startswith('lm_head.'):
if 'dense' in k_ or 'layer_norm' in k_:
k_ = 'transform.' + k_
if 'layer_norm' in k_:
k_ = k_.replace('layer_norm', 'LayerNorm')
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
if k_ in self.mlm_head.predictions.state_dict():
mlm_head_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
else:
unexpected_keys.append(_k)
if len(unexpected_keys) > 0:
print("Warnings: Unexpected keys: {}.".format(unexpected_keys))
self.embedding_LayerNorm.load_state_dict(embedding_ln_pretrained_state_dict)
self.encoder.load_state_dict(encoder_pretrained_state_dict)
if self.config.with_pooler and len(pooler_pretrained_state_dict) > 0:
self.pooler.load_state_dict(pooler_pretrained_state_dict)
if self.with_rel_head and len(relationship_head_pretrained_state_dict) > 0 and \
relationship_head_pretrained_state_dict['weight'].shape[0] == self.relationsip_head.caption_image_relationship.weight.shape[0]:
self.relationsip_head.caption_image_relationship.load_state_dict(relationship_head_pretrained_state_dict)
if self.with_mlm_head:
self.mlm_head.predictions.load_state_dict(mlm_head_pretrained_state_dict)
# TODO: load MVRC head
# FM added: Visual Linguistic Encoder that returns output hidden representations
# for all output tokens
# (code is based on VL-BERT implementation with modification where required)
class VisualLinguisticBertEncoder(VisualLinguisticBert):
def __init__(self, config, language_pretrained_model_path=None,
with_rel_head=True, with_mlm_head=True, with_mvrc_head=True):
super(VisualLinguisticBertEncoder, self).__init__(config, language_pretrained_model_path=None)
self.with_rel_head = with_rel_head
self.with_mlm_head = with_mlm_head
self.with_mvrc_head = with_mvrc_head
if with_rel_head:
self.relationsip_head = VisualLinguisticBertRelationshipPredictionHead(config)
if with_mlm_head:
self.mlm_head = BertOnlyMLMHead(config, self.word_embeddings.weight)
if with_mvrc_head:
self.mvrc_head = VisualLinguisticBertMVRCHead(config)
# init weights
self.apply(self.init_weights)
if config.visual_ln:
self.visual_ln_text.weight.data.fill_(self.config.visual_scale_text_init)
self.visual_ln_object.weight.data.fill_(self.config.visual_scale_object_init)
# load language pretrained model
if language_pretrained_model_path is not None:
self.load_language_pretrained_model(language_pretrained_model_path)
if config.word_embedding_frozen:
for p in self.word_embeddings.parameters():
p.requires_grad = False
if config.pos_embedding_frozen:
for p in self.position_embeddings.parameters():
p.requires_grad = False
def forward(self,
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=True,
output_text_and_object_separately=False):
encoder_output_embeddings, pooled_rep = super(VisualLinguisticBertEncoder, self).forward(
text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
object_mask,
output_all_encoded_layers=False,
output_text_and_object_separately=False
)
if self.with_rel_head:
relationship_logits = self.relationsip_head(pooled_rep)
else:
relationship_logits = None
if self.with_mlm_head:
mlm_logits = self.mlm_head(text_out)
else:
mlm_logits = None
if self.with_mvrc_head:
mvrc_logits = self.mvrc_head(object_out)
else:
mvrc_logits = None
return relationship_logits, mlm_logits, mvrc_logits, encoder_output_embeddings
def load_language_pretrained_model(self, language_pretrained_model_path):
pretrained_state_dict = torch.load(language_pretrained_model_path, map_location=lambda storage, loc: storage)
encoder_pretrained_state_dict = {}
pooler_pretrained_state_dict = {}
embedding_ln_pretrained_state_dict = {}
relationship_head_pretrained_state_dict = {}
mlm_head_pretrained_state_dict = {}
unexpected_keys = []
for _k, v in pretrained_state_dict.items():
if _k.startswith('bert.') or _k.startswith('roberta.'):
k = _k[len('bert.'):] if _k.startswith('bert.') else _k[len('roberta.'):]
if 'gamma' in k:
k = k.replace('gamma', 'weight')
if 'beta' in k:
k = k.replace('beta', 'bias')
if k.startswith('encoder.'):
k_ = k[len('encoder.'):]
if k_ in self.encoder.state_dict():
encoder_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif k.startswith('embeddings.'):
k_ = k[len('embeddings.'):]
if k_ == 'word_embeddings.weight':
self.word_embeddings.weight.data = v.to(dtype=self.word_embeddings.weight.data.dtype,
device=self.word_embeddings.weight.data.device)
elif k_ == 'position_embeddings.weight':
self.position_embeddings.weight.data = v.to(dtype=self.position_embeddings.weight.data.dtype,
device=self.position_embeddings.weight.data.device)
elif k_ == 'token_type_embeddings.weight':
self.token_type_embeddings.weight.data[:v.size(0)] = v.to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
if v.size(0) == 1:
# Todo: roberta token type embedding
self.token_type_embeddings.weight.data[1] = v[0].to(
dtype=self.token_type_embeddings.weight.data.dtype,
device=self.token_type_embeddings.weight.data.device)
elif k_.startswith('LayerNorm.'):
k__ = k_[len('LayerNorm.'):]
if k__ in self.embedding_LayerNorm.state_dict():
embedding_ln_pretrained_state_dict[k__] = v
else:
unexpected_keys.append(_k)
else:
unexpected_keys.append(_k)
elif self.config.with_pooler and k.startswith('pooler.'):
k_ = k[len('pooler.'):]
if k_ in self.pooler.state_dict():
pooler_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif _k.startswith('cls.seq_relationship.') and self.with_rel_head:
k_ = _k[len('cls.seq_relationship.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
if k_ in self.relationsip_head.caption_image_relationship.state_dict():
relationship_head_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
elif (_k.startswith('cls.predictions.') or _k.startswith('lm_head.')) and self.with_mlm_head:
k_ = _k[len('cls.predictions.'):] if _k.startswith('cls.predictions.') else _k[len('lm_head.'):]
if _k.startswith('lm_head.'):
if 'dense' in k_ or 'layer_norm' in k_:
k_ = 'transform.' + k_
if 'layer_norm' in k_:
k_ = k_.replace('layer_norm', 'LayerNorm')
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
if k_ in self.mlm_head.predictions.state_dict():
mlm_head_pretrained_state_dict[k_] = v
else:
unexpected_keys.append(_k)
else:
unexpected_keys.append(_k)
if len(unexpected_keys) > 0:
print("Warnings: Unexpected keys: {}.".format(unexpected_keys))
self.embedding_LayerNorm.load_state_dict(embedding_ln_pretrained_state_dict)
self.encoder.load_state_dict(encoder_pretrained_state_dict)
if self.config.with_pooler and len(pooler_pretrained_state_dict) > 0:
self.pooler.load_state_dict(pooler_pretrained_state_dict)
if self.with_rel_head and len(relationship_head_pretrained_state_dict) > 0 and \
relationship_head_pretrained_state_dict['weight'].shape[0] == self.relationsip_head.caption_image_relationship.weight.shape[0]:
self.relationsip_head.caption_image_relationship.load_state_dict(relationship_head_pretrained_state_dict)
if self.with_mlm_head:
self.mlm_head.predictions.load_state_dict(mlm_head_pretrained_state_dict)
# TODO: load MVRC head
| 43,500 | 49.700466 | 139 | py |
BertGen | BertGen-master/common/module.py | from collections import namedtuple
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
class Module(nn.Module):
def __init__(self, config):
super(Module, self).__init__()
self.config = config
def init_weight(self):
raise NotImplementedError()
def fix_params(self):
raise NotImplementedError()
def forward(self, *inputs, **kwargs):
inputs, kwargs = self.preprocess(*inputs, **kwargs)
if self.training:
return self.train_forward(*inputs, **kwargs)
else:
return self.inference_forward(*inputs, **kwargs)
def train_forward(self, *inputs, **kwargs):
"""
def train_forward(self, data, label, **kwargs):
# this is a toy example for 1 output, 2 loss function
output = None
loss1 = torch.tensor(0.0)
loss2 = torch.tensor(0.0)
outputs = {'output': output,
'loss1': loss1,
'loss2': loss2}
loss = loss1 + loss2
return outputs, loss
"""
raise NotImplemented
def inference_forward(self, *inputs, **kwargs):
"""
def inference_forward(self, data, **kwargs):
output = None
outputs = {'output': output}
return outputs
"""
raise NotImplemented
def preprocess(self, *inputs, **kwargs):
if self.training:
return self.train_preprocess(*inputs, **kwargs)
else:
return self.inference_preprocess(*inputs, **kwargs)
def train_preprocess(self, *inputs, **kwargs):
return inputs, kwargs
def inference_preprocess(self, *inputs, **kwargs):
return inputs, kwargs
| 1,786 | 26.921875 | 65 | py |
BertGen | BertGen-master/common/trainer.py | import os
import time
from collections import namedtuple
import torch
try:
from apex import amp
from apex.amp import _amp_state
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'rank',
'add_step',
'data_in_time',
'data_transfer_time',
'forward_time',
'backward_time',
'optimizer_time',
'metric_time',
'eval_metric',
'locals'])
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def to_cuda(batch):
batch = list(batch)
for i in range(len(batch)):
if isinstance(batch[i], torch.Tensor):
batch[i] = batch[i].cuda(non_blocking=True)
elif isinstance(batch[i], list):
for j, o in enumerate(batch[i]):
if isinstance(batch[i], torch.Tensor):
batch[i][j] = o.cuda(non_blocking=True)
return batch
def train(net,
optimizer,
lr_scheduler,
train_loader,
train_sampler,
metrics,
begin_epoch,
end_epoch,
logger,
rank=None,
batch_end_callbacks=None,
epoch_end_callbacks=None,
writer=None,
validation_monitor=None,
fp16=False,
clip_grad_norm=-1,
gradient_accumulate_steps=1):
assert isinstance(gradient_accumulate_steps, int) and gradient_accumulate_steps >= 1
# torch.autograd.set_detect_anomaly(True)
for epoch in range(begin_epoch, end_epoch):
print('PROGRESS: %.2f%%' % (100.0 * epoch / end_epoch))
# set epoch as random seed of sampler while distributed training
if train_sampler is not None and hasattr(train_sampler, 'set_epoch'):
train_sampler.set_epoch(epoch)
# reset metrics
metrics.reset()
# set net to train mode
net.train()
# clear the paramter gradients
# optimizer.zero_grad()
# init end time
end_time = time.time()
if isinstance(lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
name, value = validation_monitor.metrics.get()
val = value[name.index(validation_monitor.host_metric_name)]
lr_scheduler.step(val, epoch)
# training
for nbatch, batch in enumerate(train_loader):
global_steps = len(train_loader) * epoch + nbatch
os.environ['global_steps'] = str(global_steps)
# record time
data_in_time = time.time() - end_time
# transfer data to GPU
data_transfer_time = time.time()
batch = to_cuda(batch)
data_transfer_time = time.time() - data_transfer_time
# forward
forward_time = time.time()
outputs, loss = net(*batch)
loss = loss.mean()
if gradient_accumulate_steps > 1:
loss = loss / gradient_accumulate_steps
forward_time = time.time() - forward_time
# backward
backward_time = time.time()
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
backward_time = time.time() - backward_time
optimizer_time = time.time()
if (global_steps + 1) % gradient_accumulate_steps == 0:
# step LR scheduler
if lr_scheduler is not None and not isinstance(lr_scheduler,
torch.optim.lr_scheduler.ReduceLROnPlateau):
lr_scheduler.step()
# clip gradient
if clip_grad_norm > 0:
if fp16:
total_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer),
clip_grad_norm)
else:
total_norm = torch.nn.utils.clip_grad_norm_(net.parameters(),
clip_grad_norm)
if writer is not None:
writer.add_scalar(tag='grad-para/Total-Norm',
scalar_value=float(total_norm),
global_step=global_steps)
optimizer.step()
# clear the parameter gradients
optimizer.zero_grad()
optimizer_time = time.time() - optimizer_time
# update metric
metric_time = time.time()
metrics.update(outputs)
if writer is not None:
with torch.no_grad():
for group_i, param_group in enumerate(optimizer.param_groups):
writer.add_scalar(tag='Initial-LR/Group_{}'.format(group_i),
scalar_value=param_group['initial_lr'],
global_step=global_steps)
writer.add_scalar(tag='LR/Group_{}'.format(group_i),
scalar_value=param_group['lr'],
global_step=global_steps)
writer.add_scalar(tag='Train-Loss',
scalar_value=float(loss.item()),
global_step=global_steps)
name, value = metrics.get()
for n, v in zip(name, value):
writer.add_scalar(tag='Train-' + n,
scalar_value=v,
global_step=global_steps)
metric_time = time.time() - metric_time
# execute batch_end_callbacks
if batch_end_callbacks is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch, add_step=True, rank=rank,
data_in_time=data_in_time, data_transfer_time=data_transfer_time,
forward_time=forward_time, backward_time=backward_time,
optimizer_time=optimizer_time, metric_time=metric_time,
eval_metric=metrics, locals=locals())
_multiple_callbacks(batch_end_callbacks, batch_end_params)
# update end time
end_time = time.time()
# excute epoch_end_callbacks
if validation_monitor is not None:
validation_monitor(epoch, net, optimizer, writer)
if epoch_end_callbacks is not None:
_multiple_callbacks(epoch_end_callbacks, epoch, net, optimizer, writer, validation_monitor=validation_monitor)
| 7,661 | 37.31 | 122 | py |
BertGen | BertGen-master/common/backbone/resnet/resnet.py | """
Modified from torchvision, but exposes features from different stages
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
import warnings
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
model_layers = {
'resnet18': [2, 2, 2, 2],
'resnet34': [3, 4, 6, 3],
'resnet50': [3, 4, 6, 3],
'resnet101': [3, 4, 23, 3],
'resnet152': [3, 8, 36, 3],
}
def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, dilation=dilation,
padding=padding, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, **kwargs):
super(BasicBlock, self).__init__()
# if dilation == 1:
# self.conv1 = conv3x3(inplanes, planes, stride, dilation)
# elif dilation == 2:
# self.conv1 = conv3x3(inplanes, planes, stride, dilation, padding=2)
# else:
# raise ValueError('dilation must be 1 or 2!')
self.conv1 = conv3x3(inplanes, planes, stride, dilation, padding=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, stride_in_1x1=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1 if not stride_in_1x1 else stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# if dilation == 1:
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
# dilation=dilation, padding=1, bias=False)
# elif dilation == 2:
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
# dilation=dilation, padding=2, bias=False)
# else:
# raise ValueError('dilation must be 1 or 2!')
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
layers_planes = [64, 128, 256, 512]
layers_strides = [1, 2, 2, 2]
layers_dilations = dilations if dilations is not None else [1, 1, 1, 1]
for i, dilation in enumerate(layers_dilations):
if dilation == 2:
layers_strides[i] = 1
layers_planes = layers_planes[:len(layers)]
layers_strides = layers_strides[:len(layers)]
layers_dilations = layers_dilations[:len(layers)]
for i, (planes, blocks, stride, dilation) in enumerate(zip(layers_planes, layers, layers_strides, layers_dilations)):
layer = self._make_layer(block, planes, blocks, stride=stride, dilation=dilation, stride_in_1x1=stride_in_1x1)
self.__setattr__('layer{}'.format(i + 1), layer)
self.num_layers = i + 1
self.has_fc_head = 6 in expose_stages
self.expose_stages = expose_stages
if self.has_fc_head:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.expose_stages.remove(6)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, stride_in_1x1=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation, stride_in_1x1=stride_in_1x1))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
expose_feats = {}
feats = {}
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
feats['body1'] = x
for i in range(self.num_layers):
x = self.__getattr__("layer{}".format(i + 1))(x)
feats['body{}'.format(i + 2)] = x
if self.has_fc_head:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
expose_feats['cls_score'] = x
if self.expose_stages is not None:
for expose_stage in self.expose_stages:
feat_name = 'body{}'.format(expose_stage)
expose_feats[feat_name] = feats[feat_name]
return expose_feats
def load_pretrained_state_dict(self, state_dict):
"""Load state dict of pretrained model
Args:
state_dict (dict): state dict to load
"""
new_state_dict = self.state_dict()
miss_keys = []
for k in new_state_dict.keys():
if k in state_dict.keys():
new_state_dict[k] = state_dict[k]
else:
miss_keys.append(k)
if len(miss_keys) > 0:
warnings.warn('miss keys: {}'.format(miss_keys))
self.load_state_dict(new_state_dict)
def frozen_parameters(self, frozen_stages=None, frozen_bn=False):
if frozen_bn:
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
for param in module.parameters():
param.requires_grad = False
if frozen_stages is not None:
for stage in frozen_stages:
assert (stage >= 1) and (stage <= 6)
if stage == 1:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
elif stage < 6:
for param in self.__getattr__("layer{}".format(stage - 1)).parameters():
param.requires_grad = False
else:
for param in self.fc.parameters():
param.requires_grad = False
def bn_eval(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval()
def resnet18(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, **kwargs):
"""Constructs a ResNet-18 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet18'][:end_stage - 1]
model = ResNet(block=BasicBlock, layers=layers, num_classes=num_classes, expose_stages=expose_stages, dilations=dilations)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet18'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet34(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, **kwargs):
"""Constructs a ResNet-34 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet34'][:end_stage - 1]
model = ResNet(block=BasicBlock, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet34'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet50(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
"""Constructs a ResNet-50 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet50'][:end_stage - 1]
model = ResNet(block=Bottleneck, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations, stride_in_1x1=stride_in_1x1)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet50'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet101(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
"""Constructs a ResNet-101 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet101'][:end_stage - 1]
model = ResNet(block=Bottleneck, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations, stride_in_1x1=stride_in_1x1)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet101'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet152(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
"""Constructs a ResNet-152 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet152'][:end_stage - 1]
model = ResNet(block=Bottleneck, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations, stride_in_1x1=stride_in_1x1)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet152'])
model.load_pretrained_state_dict(state_dict)
return model
| 17,247 | 40.461538 | 135 | py |
BertGen | BertGen-master/common/callbacks/epoch_end_callbacks/checkpoint.py | import torch
class Checkpoint(object):
def __init__(self, prefix, frequent):
super(Checkpoint, self).__init__()
self.prefix = prefix
self.frequent = frequent
def __call__(self, epoch_num, net, optimizer, writer, validation_monitor=None):
if (epoch_num + 1) % self.frequent == 0:
param_name = '{}-{:04d}.model'.format(self.prefix, epoch_num)
checkpoint_dict = dict()
checkpoint_dict['state_dict'] = net.state_dict()
checkpoint_dict['optimizer'] = optimizer.state_dict()
save_to_best = False
if validation_monitor is not None:
checkpoint_dict['validation_monitor'] = validation_monitor.state_dict()
if validation_monitor.best_epoch == epoch_num:
save_to_best = True
torch.save(checkpoint_dict, param_name)
if save_to_best:
best_param_name = '{}-best.model'.format(self.prefix)
torch.save(checkpoint_dict, best_param_name)
print('Save new best model to {}.'.format(best_param_name))
| 1,119 | 42.076923 | 87 | py |
BertGen | BertGen-master/common/nlp/misc.py | import torch
import random
def get_align_matrix(aligned_ids, sparse=False, device=None, dtype=torch.float32):
"""
Get aligned matrix for feature alignment in sentence embedding
:param aligned_ids: list, aligned_ids[k] means original index of k-th token
:param sparse: whether to return sparse matrix
:param device: device of returned align matrix
:param dtype: dtype of returned align matrix
:return: align_matrix: torch.FloatTensor, shape: (L, L')
Example:
>> aligned_ids = [0, 0, 1, 2, 2, 2]
>> get_align_matrix(aligned_ids)
tensor([[0.5000, 0.5000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.3333, 0.3333, 0.3333]])
"""
l0 = max(aligned_ids) + 1
l1 = len(aligned_ids)
if sparse:
raise NotImplementedError
else:
align_matrix = torch.zeros((l0, l1), dtype=dtype, device=device)
align_matrix[aligned_ids, torch.arange(l1)] = 1
align_matrix = align_matrix / align_matrix.sum(dim=1, keepdim=True)
return align_matrix
def get_all_ngrams(words):
"""
Get all n-grams of words
:param words: list of str
:return: ngrams, list of (list of str)
"""
ngrams = []
N = len(words)
for n in range(1, N + 1):
for i in range(0, N - n + 1):
ngrams.append([words[j] for j in range(i, i + n)])
return ngrams
def random_word_with_token_ids(token_ids, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param token_ids: list of int, list of token id.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
mask_id = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]
for i, token_id in enumerate(token_ids):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
token_ids[i] = mask_id
# 10% randomly change token to random token
elif prob < 0.9:
token_ids[i] = random.choice(list(tokenizer.vocab.items()))[1]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
output_label.append(token_id)
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return token_ids, output_label
| 2,726 | 30.344828 | 104 | py |
BertGen | BertGen-master/common/nlp/time_distributed.py | """
A wrapper that unrolls the second (time) dimension of a tensor
into the first (batch) dimension, applies some other ``Module``,
and then rolls the time dimension back up.
"""
import torch
class TimeDistributed(torch.nn.Module):
"""
Given an input shaped like ``(batch_size, time_steps, [rest])`` and a ``Module`` that takes
inputs like ``(batch_size, [rest])``, ``TimeDistributed`` reshapes the input to be
``(batch_size * time_steps, [rest])``, applies the contained ``Module``, then reshapes it back.
Note that while the above gives shapes with ``batch_size`` first, this ``Module`` also works if
``batch_size`` is second - we always just combine the first two dimensions, then split them.
"""
def __init__(self, module):
super(TimeDistributed, self).__init__()
self._module = module
def forward(self, *inputs, **kwargs): # pylint: disable=arguments-differ
reshaped_inputs = []
for input_tensor in inputs:
input_size = input_tensor.size()
if len(input_size) <= 2:
raise RuntimeError("No dimension to distribute: " + str(input_size))
# Squash batch_size and time_steps into a single axis; result has shape
# (batch_size * time_steps, input_size).
squashed_shape = [-1] + [x for x in input_size[2:]]
reshaped_inputs.append(input_tensor.contiguous().view(*squashed_shape))
reshaped_outputs = self._module(*reshaped_inputs, **kwargs)
if isinstance(reshaped_outputs, torch.Tensor):
# Now get the output back into the right shape.
# (batch_size, time_steps, [hidden_size])
new_shape = [input_size[0], input_size[1]] + [x for x in reshaped_outputs.size()[1:]]
outputs = reshaped_outputs.contiguous().view(*new_shape)
elif isinstance(reshaped_outputs, tuple):
outputs = []
for output in reshaped_outputs:
new_shape = [input_size[0], input_size[1]] + [x for x in output.size()[1:]]
outputs.append(output.contiguous().view(*new_shape))
outputs = tuple(outputs)
else:
raise ValueError("Not support!")
return outputs
| 2,245 | 42.192308 | 99 | py |
BertGen | BertGen-master/common/nlp/encoder_base.py | from typing import Tuple, Union, Optional, Callable
import torch
from torch.nn.utils.rnn import pack_padded_sequence, PackedSequence
# We have two types here for the state, because storing the state in something
# which is Iterable (like a tuple, below), is helpful for internal manipulation
# - however, the states are consumed as either Tensors or a Tuple of Tensors, so
# returning them in this format is unhelpful.
RnnState = Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] # pylint: disable=invalid-name
RnnStateStorage = Tuple[torch.Tensor, ...] # pylint: disable=invalid-name
def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):
"""
Compute sequence lengths for each batch element in a tensor using a
binary mask.
Parameters
----------
mask : torch.Tensor, required.
A 2D binary mask of shape (batch_size, sequence_length) to
calculate the per-batch sequence lengths from.
Returns
-------
A torch.LongTensor of shape (batch_size,) representing the lengths
of the sequences in the batch.
"""
return mask.long().sum(-1)
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permuation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise Exception("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device)
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
class _EncoderBase(torch.nn.Module):
# pylint: disable=abstract-method
"""
This abstract class serves as a base for the 3 ``Encoder`` abstractions in AllenNLP.
- :class:`~allennlp.modules.seq2seq_encoders.Seq2SeqEncoders`
- :class:`~allennlp.modules.seq2vec_encoders.Seq2VecEncoders`
Additionally, this class provides functionality for sorting sequences by length
so they can be consumed by Pytorch RNN classes, which require their inputs to be
sorted by length. Finally, it also provides optional statefulness to all of it's
subclasses by allowing the caching and retrieving of the hidden states of RNNs.
"""
def __init__(self, stateful: bool = False) -> None:
super(_EncoderBase, self).__init__()
self.stateful = stateful
self._states: Optional[RnnStateStorage] = None
def sort_and_run_forward(self,
module: Callable[[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState]],
inputs: torch.Tensor,
mask: torch.Tensor,
hidden_state: Optional[RnnState] = None):
"""
This function exists because Pytorch RNNs require that their inputs be sorted
before being passed as input. As all of our Seq2xxxEncoders use this functionality,
it is provided in a base class. This method can be called on any module which
takes as input a ``PackedSequence`` and some ``hidden_state``, which can either be a
tuple of tensors or a tensor.
As all of our Seq2xxxEncoders have different return types, we return `sorted`
outputs from the module, which is called directly. Additionally, we return the
indices into the batch dimension required to restore the tensor to it's correct,
unsorted order and the number of valid batch elements (i.e the number of elements
in the batch which are not completely masked). This un-sorting and re-padding
of the module outputs is left to the subclasses because their outputs have different
types and handling them smoothly here is difficult.
Parameters
----------
module : ``Callable[[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState]]``, required.
A function to run on the inputs. In most cases, this is a ``torch.nn.Module``.
inputs : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length, embedding_size)`` representing
the inputs to the Encoder.
mask : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length)``, representing masked and
non-masked elements of the sequence for each element in the batch.
hidden_state : ``Optional[RnnState]``, (default = None).
A single tensor of shape (num_layers, batch_size, hidden_size) representing the
state of an RNN with or a tuple of
tensors of shapes (num_layers, batch_size, hidden_size) and
(num_layers, batch_size, memory_size), representing the hidden state and memory
state of an LSTM-like RNN.
Returns
-------
module_output : ``Union[torch.Tensor, PackedSequence]``.
A Tensor or PackedSequence representing the output of the Pytorch Module.
The batch size dimension will be equal to ``num_valid``, as sequences of zero
length are clipped off before the module is called, as Pytorch cannot handle
zero length sequences.
final_states : ``Optional[RnnState]``
A Tensor representing the hidden state of the Pytorch Module. This can either
be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in
the case of a GRU, or a tuple of tensors, such as those required for an LSTM.
restoration_indices : ``torch.LongTensor``
A tensor of shape ``(batch_size,)``, describing the re-indexing required to transform
the outputs back to their original batch order.
"""
# In some circumstances you may have sequences of zero length. ``pack_padded_sequence``
# requires all sequence lengths to be > 0, so remove sequences of zero length before
# calling self._module, then fill with zeros.
# First count how many sequences are empty.
batch_size = mask.size(0)
num_valid = torch.sum(mask[:, 0]).int().item()
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, sorting_indices = \
sort_batch_by_length(inputs, sequence_lengths)
# Now create a PackedSequence with only the non-empty, sorted sequences.
packed_sequence_input = pack_padded_sequence(sorted_inputs[:num_valid, :, :],
sorted_sequence_lengths[:num_valid].data.tolist(),
batch_first=True)
# Prepare the initial states.
if not self.stateful:
if hidden_state is None:
initial_states = hidden_state
elif isinstance(hidden_state, tuple):
initial_states = [state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
for state in hidden_state]
else:
initial_states = hidden_state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
else:
initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices)
# Actually call the module on the sorted PackedSequence.
module_output, final_states = module(packed_sequence_input, initial_states)
return module_output, final_states, restoration_indices
def _get_initial_states(self,
batch_size: int,
num_valid: int,
sorting_indices: torch.LongTensor) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0),
num_states_to_concat,
state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :]
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [state.index_select(1, sorting_indices)
for state in correctly_shaped_states]
return tuple(state[:, :num_valid, :] for state in sorted_states)
def _update_states(self,
final_states: RnnStateStorage,
restoration_indices: torch.LongTensor) -> None:
"""
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detaches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
Parameters
----------
final_states : ``RnnStateStorage``, required.
The hidden states returned as output from the RNN.
restoration_indices : ``torch.LongTensor``, required.
The indices that invert the sorting used in ``sort_and_run_forward``
to order the states with respect to the lengths of the sequences in
the batch.
"""
# TODO(Mark): seems weird to sort here, but append zeros in the subclasses.
# which way around is best?
new_unsorted_states = [state.index_select(1, restoration_indices)
for state in final_states]
if self._states is None:
# We don't already have states, so just set the
# ones we receive to be the current state.
self._states = tuple(state.data for state in new_unsorted_states)
else:
# Now we've sorted the states back so that they correspond to the original
# indices, we need to figure out what states we need to update, because if we
# didn't use a state for a particular row, we want to preserve its state.
# Thankfully, the rows which are all zero in the state correspond exactly
# to those which aren't used, so we create masks of shape (new_batch_size,),
# denoting which states were used in the RNN computation.
current_state_batch_size = self._states[0].size(1)
new_state_batch_size = final_states[0].size(1)
# Masks for the unused states of shape (1, new_batch_size, 1)
used_new_rows_mask = [(state[0, :, :].sum(-1)
!= 0.0).float().view(1, new_state_batch_size, 1)
for state in new_unsorted_states]
new_states = []
if current_state_batch_size > new_state_batch_size:
# The new state is smaller than the old one,
# so just update the indices which we used.
for old_state, new_state, used_mask in zip(self._states,
new_unsorted_states,
used_new_rows_mask):
# zero out all rows in the previous state
# which _were_ used in the current state.
masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
old_state[:, :new_state_batch_size, :] = new_state + masked_old_state
new_states.append(old_state.detach())
else:
# The states are the same size, so we just have to
# deal with the possibility that some rows weren't used.
new_states = []
for old_state, new_state, used_mask in zip(self._states,
new_unsorted_states,
used_new_rows_mask):
# zero out all rows which _were_ used in the current state.
masked_old_state = old_state * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
new_state += masked_old_state
new_states.append(new_state.detach())
# It looks like there should be another case handled here - when
# the current_state_batch_size < new_state_batch_size. However,
# this never happens, because the states themeselves are mutated
# by appending zeros when calling _get_inital_states, meaning that
# the new states are either of equal size, or smaller, in the case
# that there are some unused elements (zero-length) for the RNN computation.
self._states = tuple(new_states)
def reset_states(self):
self._states = None
| 18,404 | 52.502907 | 109 | py |
BertGen | BertGen-master/common/nlp/bert_encoder_wrapper.py | import torch
import torch.nn as nn
from external.pytorch_pretrained_bert.modeling import BertEncoder, BertLayerNorm
class BertEncoderWrapper(nn.Module):
def __init__(self, bert_config, input_size, output_all_encoded_layers=False):
super(BertEncoderWrapper, self).__init__()
self.bert_config = bert_config
self.output_all_encoded_layers = output_all_encoded_layers
self.input_transform = nn.Linear(input_size, bert_config.hidden_size)
self.with_position_embeddings = False if 'with_position_embeddings' not in bert_config \
else bert_config.with_position_embeddings
if self.with_position_embeddings:
self.position_embedding = nn.Embedding(bert_config.max_position_embeddings, bert_config.hidden_size)
self.LayerNorm = BertLayerNorm(bert_config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.bert_encoder = BertEncoder(bert_config)
self.apply(self.init_bert_weights)
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def get_output_dim(self):
return self.bert_config.hidden_size
def forward(self, inputs, mask):
inputs = self.input_transform(inputs)
if self.with_position_embeddings:
seq_length = inputs.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=inputs.device)
position_ids = position_ids.unsqueeze(0).expand((inputs.shape[0], inputs.shape[1]))
position_embeddings = self.position_embedding(position_ids)
inputs = inputs + position_embeddings
inputs = self.LayerNorm(inputs)
inputs = self.dropout(inputs)
extended_attention_mask = mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
output = self.bert_encoder(inputs,
extended_attention_mask,
output_all_encoded_layers=self.output_all_encoded_layers)
if not self.output_all_encoded_layers:
output = output[0]
return output
| 3,207 | 49.125 | 112 | py |
BertGen | BertGen-master/common/nlp/input_variational_dropout.py | import torch
class InputVariationalDropout(torch.nn.Dropout):
"""
Apply the dropout technique in Gal and Ghahramani, "Dropout as a Bayesian Approximation:
Representing Model Uncertainty in Deep Learning" (https://arxiv.org/abs/1506.02142) to a
3D tensor.
This module accepts a 3D tensor of shape ``(batch_size, num_timesteps, embedding_dim)``
and samples a single dropout mask of shape ``(batch_size, embedding_dim)`` and applies
it to every time step.
"""
def forward(self, input_tensor):
# pylint: disable=arguments-differ
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied.
"""
ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1])
dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False)
if self.inplace:
input_tensor *= dropout_mask.unsqueeze(1)
return None
else:
return dropout_mask.unsqueeze(1) * input_tensor | 1,324 | 37.970588 | 98 | py |
BertGen | BertGen-master/common/nlp/roberta/utils.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import os
try:
from functools import lru_cache
except ImportError:
# Just a dummy decorator to get the checks to run on python2
# because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
def lru_cache():
return lambda func: func
import logging
import json
import six
from io import open
from functools import wraps
import boto3
import requests
from botocore.exceptions import ClientError
import shutil
from hashlib import sha256
import fnmatch
import tempfile
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_TRANSFORMERS_CACHE',
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path))
PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
logger = logging.getLogger(__name__)
SPECIAL_TOKENS_MAP_FILE = 'special_tokens_map.json'
ADDED_TOKENS_FILE = 'added_tokens.json'
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
_chr = unichr if sys.version_info[0] == 2 else chr
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [_chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
class PreTrainedTokenizer(object):
""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size.
Parameters:
- ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token``
- ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token``
- ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token``
- ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens``
"""
vocab_files_names = {}
pretrained_vocab_files_map = {}
max_model_input_sizes = {}
SPECIAL_TOKENS_ATTRIBUTES = ["bos_token", "eos_token", "unk_token", "sep_token",
"pad_token", "cls_token", "mask_token",
"additional_special_tokens"]
@property
def bos_token(self):
""" Beginning of sentence token (string). Log an error if used while not having been set. """
if self._bos_token is None:
logger.error("Using bos_token, but it is not set yet.")
return self._bos_token
@property
def eos_token(self):
""" End of sentence token (string). Log an error if used while not having been set. """
if self._eos_token is None:
logger.error("Using eos_token, but it is not set yet.")
return self._eos_token
@property
def unk_token(self):
""" Unknown token (string). Log an error if used while not having been set. """
if self._unk_token is None:
logger.error("Using unk_token, but it is not set yet.")
return self._unk_token
@property
def sep_token(self):
""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
if self._sep_token is None:
logger.error("Using sep_token, but it is not set yet.")
return self._sep_token
@property
def pad_token(self):
""" Padding token (string). Log an error if used while not having been set. """
if self._pad_token is None:
logger.error("Using pad_token, but it is not set yet.")
return self._pad_token
@property
def cls_token(self):
""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
if self._cls_token is None:
logger.error("Using cls_token, but it is not set yet.")
return self._cls_token
@property
def mask_token(self):
""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
if self._mask_token is None:
logger.error("Using mask_token, but it is not set yet.")
return self._mask_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
if self._additional_special_tokens is None:
logger.error("Using additional_special_tokens, but it is not set yet.")
return self._additional_special_tokens
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
def __init__(self, max_len=None, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._additional_special_tokens = []
self.max_len = max_len if max_len is not None else int(1e12)
self.added_tokens_encoder = {}
self.added_tokens_decoder = {}
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == 'additional_special_tokens':
assert isinstance(value, (list, tuple)) and all(
isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
else:
assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
setattr(self, key, value)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~pytorch_transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~pytorch_transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path))
# Look for the tokenizer main vocabulary files
for file_id, file_name in cls.vocab_files_names.items():
if os.path.isdir(pretrained_model_name_or_path):
# If a directory is provided we look for the standard filenames
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
else:
# If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file)
full_file_name = pretrained_model_name_or_path
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
# Look for the additional tokens files
all_vocab_files_names = {'added_tokens_file': ADDED_TOKENS_FILE,
'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE}
# If a path to a file was provided, get the parent directory
saved_directory = pretrained_model_name_or_path
if os.path.exists(saved_directory) and not os.path.isdir(saved_directory):
saved_directory = os.path.dirname(saved_directory)
for file_id, file_name in all_vocab_files_names.items():
full_file_name = os.path.join(saved_directory, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
if all(full_file_name is None for full_file_name in vocab_files.values()):
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find tokenizer files"
"at this path or url.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path, ))
return None
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
logger.error("Couldn't reach server to download vocabulary.")
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path, str(vocab_files.keys())))
return None
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(
file_path, resolved_vocab_files[file_id]))
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
if max_len is not None and isinstance(max_len, (int, float)):
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Merge resolved_vocab_files arguments in kwargs.
added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in kwargs:
kwargs[args_name] = file_path
if special_tokens_map_file is not None:
special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8"))
for key, value in special_tokens_map.items():
if key not in kwargs:
kwargs[key] = value
# Instantiate tokenizer.
tokenizer = cls(*inputs, **kwargs)
# Add supplementary tokens.
if added_tokens_file is not None:
added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8"))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
return tokenizer
def save_pretrained(self, save_directory):
""" Save the tokenizer vocabulary files (with added tokens) and the
special-tokens-to-class-attributes-mapping to a directory.
This method make sure the full tokenizer can then be re-loaded using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
if not os.path.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
with open(added_tokens_file, 'w', encoding='utf-8') as f:
if self.added_tokens_encoder:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
else:
out_str = u"{}"
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory):
""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
raise NotImplementedError
def vocab_size(self):
""" Size of the base vocabulary (without the added tokens) """
raise NotImplementedError
def __len__(self):
""" Size of the full vocabulary with the added tokens """
return self.vocab_size + len(self.added_tokens_encoder)
def add_tokens(self, new_tokens):
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if not new_tokens:
return 0
to_add_tokens = []
for token in new_tokens:
assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode))
if token != self.unk_token and \
self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token):
to_add_tokens.append(token)
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
return len(to_add_tokens)
def add_special_tokens(self, special_tokens_dict):
"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES
if key == 'additional_special_tokens':
assert isinstance(value, (list, tuple)) and all(
isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
added_tokens += self.add_tokens([value])
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
"""
def split_on_tokens(tok_list, text):
if not text:
return []
if not tok_list:
return self._tokenize(text, **kwargs)
tok = tok_list[0]
split_text = text.split(tok)
return sum((split_on_tokens(tok_list[1:], sub_text.strip()) + [tok] \
for sub_text in split_text), [])[:-1]
added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
""" Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id
(resp. a sequence of ids), using the vocabulary.
"""
if isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode)):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
if len(ids) > self.max_len:
logger.warning("Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.max_len))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(self, text, text_pair=None, add_special_tokens=False):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text: The first sequence to be encoded.
text_pair: Optional second sequence to be encoded.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
"""
if text_pair is None:
if add_special_tokens:
return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text)))
else:
return self.convert_tokens_to_ids(self.tokenize(text))
first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text)]
second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair)]
if add_special_tokens:
return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens)
else:
return first_sentence_tokens, second_sentence_tokens
def add_special_tokens_single_sentence(self, token_ids):
raise NotImplementedError
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
raise NotImplementedError
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if index in self.all_special_ids and skip_special_tokens:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
raise NotImplementedError
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""
return ' '.join(self.convert_ids_to_tokens(tokens))
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
text = self.convert_tokens_to_string(filtered_tokens)
if self.sep_token is not None and self.sep_token in text:
text = text.replace(self.cls_token, self.sep_token)
split_text = list(filter(lambda sentence: len(sentence) > 0, text.split(self.sep_token)))
if clean_up_tokenization_spaces:
clean_text = [self.clean_up_tokenization(text) for text in split_text]
return clean_text
else:
return split_text
else:
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
@property
def special_tokens_map(self):
""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self):
""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (attr_value if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""
all_toks = self.all_special_tokens
all_ids = list(self._convert_token_to_id(t) for t in all_toks)
return all_ids
@staticmethod
def clean_up_tokenization(out_string):
""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ",
"'").replace(
" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're",
"'re")
return out_string
| 40,379 | 45.736111 | 380 | py |
BertGen | BertGen-master/common/nlp/roberta/modeling_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from external.pytorch_pretrained_bert.modeling import (BertConfig, BertEmbeddings,
BertLayerNorm, BertModel,
BertPreTrainedModel, gelu)
logger = logging.getLogger(__name__)
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin",
'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin",
'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin",
}
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json",
'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-config.json",
'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-config.json",
}
class RobertaEmbeddings(BertEmbeddings):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super(RobertaEmbeddings, self).__init__(config)
self.padding_idx = 1
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
# Position numbers begin at padding_idx+1. Padding symbols are ignored.
# cf. fairseq's `utils.make_positions`
position_ids = torch.arange(self.padding_idx + 1, seq_length + self.padding_idx + 1, dtype=torch.long,
device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
return super(RobertaEmbeddings, self).forward(input_ids, token_type_ids=token_type_ids,
position_ids=position_ids)
class RobertaConfig(BertConfig):
pretrained_config_archive_map = ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in
`RoBERTa: A Robustly Optimized BERT Pretraining Approach`_
by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
Veselin Stoyanov. It is based on Google's BERT model released in 2018.
It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining
objective and training with much larger mini-batches and learning rates.
This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained
models.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`:
https://arxiv.org/abs/1907.11692
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.RobertaConfig`): Model configuration class with all the parameters of the
model.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, RoBERTa input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP][SEP] no it is not . [SEP]``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with
the ``add_special_tokens`` parameter set to ``True``.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
class RobertaModel(BertModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaModel.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaModel, self).__init__(config)
self.embeddings = RobertaEmbeddings(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
if input_ids[:, 0].sum().item() != 0:
logger.warning("A sequence with no special tokens has been passed to the RoBERTa model. "
"This model requires special tokens in order to work. "
"Please specify add_special_tokens=True in your encoding.")
return super(RobertaModel, self).forward(input_ids, token_type_ids, attention_mask, position_ids, head_mask)
class RobertaForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForMaskedLM, self).__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, position_ids=None,
head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super(RobertaLMHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x) + self.bias
return x
class RobertaForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RoertaTokenizer.from_pretrained('roberta-base')
model = RobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.classifier = RobertaClassificationHead(config)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
| 17,448 | 53.021672 | 134 | py |
BertGen | BertGen-master/common/nlp/bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import logging
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
logger = logging.getLogger(__name__)
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
class WarmupCosineWithHardRestartsSchedule(LambdaLR):
""" Linear warmup and then cosine cycles with hard restarts.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=1., last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineWithHardRestartsSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1. + math.cos(math.pi * ((float(self.cycles) * progress) % 1.0))))
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
return loss
| 8,633 | 44.925532 | 130 | py |
BertGen | BertGen-master/common/metrics/eval_metric.py | import torch
import torch.distributed as distributed
class EvalMetric(object):
"""Base class for all evaluation metrics.
.. note::
This is a base class that provides common metric interfaces.
One should not use this class directly, but instead create new metric
classes that extend it.
Args
name (str): Name of this metric instance for display.
"""
def __init__(self, name, allreduce=False, num_replicas=1, **kwargs):
self.name = str(name)
self.allreduce=allreduce
self.num_replicas = num_replicas
self._kwargs = kwargs
self.reset()
def __str__(self):
return "EvalMetric: {}".format(dict(self.get_name_value()))
def update(self, outputs):
"""Updates the internal evaluation result.
Args
labels (list of `NDArray`): The labels of the data.
preds (list of `NDArray`): Predicted values.
"""
raise NotImplementedError()
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = torch.tensor(0.)
self.sum_metric = torch.tensor(0.)
def get(self):
"""Returns the current evaluation result.
Returns:
names (list of str): Name of the metrics.
values (list of float): Value of the evaluations.
"""
if self.num_inst.item() == 0:
return (self.name, float('nan'))
else:
if self.allreduce:
num_inst = self.num_inst.clone().cuda()
sum_metric = self.sum_metric.clone().cuda()
distributed.all_reduce(num_inst, op=distributed.ReduceOp.SUM)
distributed.all_reduce(sum_metric, op=distributed.ReduceOp.SUM)
metric_tensor = (sum_metric / num_inst).detach().cpu()
else:
metric_tensor = (self.sum_metric / self.num_inst).detach().cpu()
return (self.name, metric_tensor.item())
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
A (list of tuples): (name, value) tuple list.
"""
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
| 2,371 | 33.376812 | 80 | py |
BertGen | BertGen-master/common/metrics/pretrain_metrics.py | import torch
from .eval_metric import EvalMetric
class LossLogger(EvalMetric):
def __init__(self, output_name, display_name=None,
allreduce=False, num_replicas=1):
self.output_name = output_name
if display_name is None:
display_name = output_name
super(LossLogger, self).__init__(display_name, allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
if self.output_name in outputs:
self.sum_metric += float(outputs[self.output_name].mean().item())
self.num_inst += 1
class RelationshipAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(RelationshipAccuracy, self).__init__('RelAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['relationship_logits']
label = outputs['relationship_label']
# FM edit: change to deal with sigmoid, single output
# self.sum_metric += float((logits.argmax(dim=1) == label).sum().item())
self.sum_metric += float(( ((logits>0.5).to(device=logits.device, dtype=torch.float)).squeeze() == label).sum().item())
self.num_inst += logits.shape[0]
class MLMAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracy, self).__init__('MLMAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits']
label = outputs['mlm_label']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MLMAccuracyWVC(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracyWVC, self).__init__('MLMAccWVC', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_wvc']
label = outputs['mlm_label_wvc']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MLMAccuracyAUX(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracyAUX, self).__init__('MLMAccAUX', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_aux']
label = outputs['mlm_label_aux']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MLMAccuracyGlobal(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1, eval_name='default_name'):
super(MLMAccuracyGlobal, self).__init__('MLMAccuracy'+eval_name, allreduce, num_replicas)
self.eval_name = eval_name
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_'+self.eval_name]
label = outputs['mlm_label_'+self.eval_name]
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MLMAccuracyDataset1(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracyDataset1, self).__init__('MLMAccDataset1', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_dataset1']
label = outputs['mlm_label_dataset1']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MLMAccuracyDataset2(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracyDataset2, self).__init__('MLMAccDataset2', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_dataset2']
label = outputs['mlm_label_dataset2']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MLMAccuracyDataset3(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracyDataset3, self).__init__('MLMAccDataset3', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_dataset3']
label = outputs['mlm_label_dataset3']
keep = (label != -1)
if keep.sum() > 0:
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item()
class MVRCAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MVRCAccuracy, self).__init__('MVRCAccuracy', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mvrc_logits']
label = outputs['mvrc_label']
keep = (label.sum(2) - 1.0).abs() < 0.1
if keep.sum() > 0:
#FM note: when [keep] is applied it collapsees logits(batch,#RoI,#classes)
#to logits(#relevant_RoI, #classes)
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep].argmax(dim=1)).sum().item())
self.num_inst += keep.sum().item()
class MVRCAccuracyGlobal(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1, eval_name='default_name'):
super(MVRCAccuracyGlobal, self).__init__('MVRCAccuracy'+eval_name, allreduce, num_replicas)
self.eval_name = eval_name
def update(self, outputs):
with torch.no_grad():
logits = outputs['mvrc_logits_'+self.eval_name]
label = outputs['mvrc_label_'+self.eval_name]
keep = (label.sum(2) - 1.0).abs() < 0.1
if keep.sum() > 0:
#FM note: when [keep] is applied it collapsees logits(batch,#RoI,#classes)
#to logits(#relevant_RoI, #classes)
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep].argmax(dim=1)).sum().item())
self.num_inst += keep.sum().item()
| 6,797 | 40.2 | 131 | py |
BertGen | BertGen-master/common/metrics/composite_eval_metric.py | import numpy as np
from .eval_metric import EvalMetric
import torch
class CompositeEvalMetric(EvalMetric):
"""Manages multiple evaluation metrics.
Args:
metrics (list of EvalMetric): List of child metrics.
name (str): Name of this metric instance for display.
"""
def __init__(self, metrics=None, name='composite'):
super(CompositeEvalMetric, self).__init__(name)
if metrics is None:
metrics = []
self.metrics = metrics
def add(self, metric):
"""Adds a child metric.
Args:
metric (EvalMetric): A metric instance.
"""
self.metrics.append(metric)
def get_metric(self, index):
"""Returns a child metric.
Args:
index (int): Index of child metric in the list of metrics.
"""
try:
return self.metrics[index]
except IndexError:
return ValueError("Metric index {} is out of range 0 and {}".format(
index, len(self.metrics)))
def update(self, outputs):
"""Updates the internal evaluation result.
Args:
labels (dict of `NDArray`): The labels of the data.
preds (dict of `NDArray`): Predicted values.
"""
for metric in self.metrics:
metric.update(outputs)
def reset(self):
"""Resets the internal evaluation result to initial state."""
try:
for metric in self.metrics:
metric.reset()
except AttributeError:
pass
def get(self):
"""Returns the current evaluation result.
Returns:
names (list of str): Name of the metrics.
values (list of float): Value of the evaluations.
"""
names = []
values = []
for metric in self.metrics:
name, value = metric.get()
if isinstance(name, str):
name = [name]
if isinstance(value, (float, int, np.generic,torch.Tensor)):
value = [value]
names.extend(name)
values.extend(value)
return names, values
| 2,153 | 29.771429 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.