repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
CTranslate2 | CTranslate2-master/tools/benchmark/opennmt_ende_wmt14/opennmt_py/convertv2_v3.py | import torch
import imp
import argparse
import pyonmttok
from onmt.constants import DefaultTokens
from onmt.inputters.inputter import vocabs_to_dict
# with the two module = imp.load_source() below
# we ghost the old torchtext.data.field and depercated
# onmt.inputters.text_dataset
# however this require some functions / classes to be
# monkey patched for loading the old field/vocab objects.
def _feature_tokenize():
return 0
class RawField(object):
def __init__(self):
pass
class TextMultiField(RawField):
def __init__(self):
pass
class Field(RawField):
def __init__(self):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v2model",
type=str,
required=True,
help="Source OpenNMT-py v2.x model to be converted in v3.x",
)
parser.add_argument(
"-v3model",
type=str,
required=True,
help="Target model to be used by OpenNMT-py v3.x",
)
opt = parser.parse_args()
print(opt)
module1 = imp.load_source("torchtext.data.field", "convertv2_v3.py")
module2 = imp.load_source("onmt.inputters.text_dataset", "convertv2_v3.py")
checkpoint = torch.load(opt.v2model, map_location="cpu")
vocabs = {}
multifield = checkpoint["vocab"]["src"]
multifields = multifield.fields
_, fields = multifields[0]
voc = fields.vocab.__dict__["itos"]
src_vocab = pyonmttok.build_vocab_from_tokens(
voc, maximum_size=0, minimum_frequency=1
)
src_vocab.default_id = src_vocab[DefaultTokens.UNK]
vocabs["src"] = src_vocab
print("Source vocab size is:", len(src_vocab))
multifield = checkpoint["vocab"]["tgt"]
multifields = multifield.fields
_, fields = multifields[0]
voc = fields.vocab.__dict__["itos"]
tgt_vocab = pyonmttok.build_vocab_from_tokens(
voc, maximum_size=0, minimum_frequency=1
)
tgt_vocab.default_id = src_vocab[DefaultTokens.UNK]
vocabs["tgt"] = tgt_vocab
print("Target vocab size is:", len(tgt_vocab))
if hasattr(checkpoint["opt"], "data_task"):
print("Model is type:", checkpoint["opt"].data_task)
vocabs["data_task"] = checkpoint["opt"].data_task
else:
vocabs["data_task"] = "seq2seq"
checkpoint["vocab"] = vocabs_to_dict(vocabs)
checkpoint["opt"].__dict__["hidden_size"] = checkpoint["opt"].__dict__.pop(
"rnn_size"
)
checkpoint["opt"].__dict__["add_qkvbias"] = True
torch.save(checkpoint, opt.v3model)
| 2,542 | 27.573034 | 79 | py |
CTranslate2 | CTranslate2-master/python/ctranslate2/specs/model_spec.py | """Specifications declare the expected variables layout of CTranslate2 models
that do not load a computation graph. The model converter should make sure that
each required variable of the specification is set.
"""
import abc
import ctypes
import json
import os
import shutil
import struct
from typing import Dict, List, Optional
import numpy as np
try:
import torch
torch_is_available = True
except ImportError:
torch_is_available = False
OPTIONAL = "__optional"
CURRENT_BINARY_VERSION = 6
def _join_scope(scope, name):
if not scope:
return name
return "%s/%s" % (scope, name)
def _split_scope(scope):
return scope.split("/")
def _parent_scope(scope):
keys = _split_scope(scope)
scope, attr = keys[:-1], keys[-1]
return "/".join(scope), attr
def visit_spec(spec, fn, scope=""):
"""Recursively visits a layer spec."""
for name, value in list(spec.__dict__.items()):
if name.startswith("_"):
continue
if isinstance(value, list):
for i, elem in enumerate(value):
visit_spec(elem, fn, scope=_join_scope(scope, "%s_%d" % (name, i)))
elif isinstance(value, LayerSpec):
visit_spec(value, fn, scope=_join_scope(scope, name))
else:
fn(spec, _join_scope(scope, name), value)
def index_spec(spec, index):
if not index:
return spec
keys = _split_scope(index)
for key in keys:
try:
spec = getattr(spec, key)
except AttributeError:
attr, index = key.rsplit("_", 1)
spec = getattr(spec, attr)[int(index)]
return spec
class FrozenMeta(type):
def __call__(self, *args, **kwargs):
instance = super().__call__(*args, **kwargs)
instance._frozen = True
return instance
class FrozenAttr:
def __setattr__(self, key, value):
if hasattr(self, "_frozen") and not hasattr(self, key):
raise AttributeError("Attribute %s does not exist" % key)
super().__setattr__(key, value)
class LayerSpec(FrozenAttr, metaclass=FrozenMeta):
"""A layer specification declares the weights that should be set by the converters."""
def validate(self) -> None:
"""Verify that the required weights are set.
Raises:
ValueError: If a required weight is not set in the specification.
"""
unset_attributes = []
def _check(spec, name, value):
if value is None:
unset_attributes.append(name)
return
if isinstance(value, np.ndarray):
# float64 is not a supported type.
if value.dtype == np.float64:
value = value.astype(np.float32)
elif isinstance(value, float):
value = np.dtype("float32").type(value)
elif isinstance(value, bool):
# Convert bool to an integer type.
value = np.dtype("int8").type(value)
elif isinstance(value, str):
if value != OPTIONAL:
value = np.frombuffer(value.encode("utf-8"), dtype=np.int8)
if isinstance(value, np.ndarray) or isinstance(value, np.generic):
value = NumpyVariable(value)
elif torch_is_available and isinstance(value, torch.Tensor):
value = PyTorchVariable(value)
attr_name = _split_scope(name)[-1]
setattr(spec, attr_name, value)
self._visit(_check)
if unset_attributes:
raise ValueError(
"Some required model attributes are not set:\n\n%s"
% "\n".join(unset_attributes)
)
def variables(
self,
prefix: str = "",
ordered: bool = False,
) -> Dict[str, np.ndarray]:
"""Recursively returns the weights from this layer and its children.
Arguments:
prefix: Prefix to prepend to all variable names.
ordered: If set, an ordered list is returned instead.
Returns:
Dictionary mapping variables name to value.
"""
var = {}
def _register_var(spec, name, value):
if isinstance(value, str) and value == OPTIONAL:
return
var[_join_scope(prefix, name)] = value
self._visit(_register_var)
if ordered:
return list(sorted(var.items(), key=lambda x: x[0]))
return var
def _alias_variables(self):
"""Find duplicate variables in spec and create aliases."""
# When a variable is duplicated, keep the version that comes first in
# the alphabetical order and alias the others.
variables = self.variables(ordered=True)
for name, value in reversed(variables):
for other_name, other_value in variables:
if name == other_name:
break
# Because variables can be transformed on load (e.g. transposed),
# we use an element-wise equality check.
if not value.is_scalar() and value.equal(other_value):
# Replace variable value by the alias name.
scope, attr_name = _parent_scope(name)
spec = index_spec(self, scope)
setattr(spec, attr_name, other_name)
break
def _quantize(self, quantization):
"""Possibly quantizes the variable of the layer."""
def _quantize(spec, name, value):
if not isinstance(value, Variable) or value.is_scalar():
return
key = _split_scope(name)[-1]
scale = None
is_quantizable = hasattr(spec, "%s_scale" % key)
is_convertible = value.dtype in ("float32", "float16", "bfloat16")
if is_quantizable:
if quantization == "int16":
value = value.to("float32").numpy()
# Represent the value with 10 bits so the multiplication is 20 bits
# and 12 bits are left for accumulation.
scale = np.float32(2**10 / np.amax(np.absolute(value)))
value *= scale
value = np.rint(value)
value = np.clip(
value, np.iinfo(np.int16).min, np.iinfo(np.int16).max
)
value = value.astype(np.int16)
scale = NumpyVariable(scale)
value = NumpyVariable(value)
elif quantization in ("int8", "int8_float16", "int8_bfloat16"):
value = value.to("float32").numpy()
amax = np.amax(np.absolute(value), axis=1)
amax[amax == 0] = 127.0
scale = 127.0 / amax
value *= np.expand_dims(scale, 1)
value = np.rint(value)
value = value.astype(np.int8)
scale = NumpyVariable(scale)
value = NumpyVariable(value)
elif quantization in ("float16", "bfloat16"):
value = value.to(quantization)
else:
value = value.to("float32")
elif is_convertible:
if quantization in ("float16", "int8_float16"):
value = value.to("float16")
elif quantization in ("bfloat16", "int8_bfloat16"):
value = value.to("bfloat16")
else:
value = value.to("float32")
setattr(spec, key, value)
if scale is not None:
setattr(spec, "%s_scale" % key, scale)
self._visit(_quantize)
def optimize(self, quantization: Optional[str] = None) -> None:
"""Recursively applies some optimizations to this layer:
* Alias variables with the same shape and value.
* Quantize weights.
Arguments:
quantization: Weight quantization scheme
(possible values are: int8, int8_float16, int16, float16).
"""
self._alias_variables()
self._quantize(quantization)
def _visit(self, fn):
"""Recursively visits this layer and its children."""
visit_spec(self, fn)
def _dtype_to_type_id(object_dtype):
# Order should match the DataType enum in include/ctranslate2/types.h
dtypes = ("float32", "int8", "int16", "int32", "float16", "bfloat16")
try:
return dtypes.index(object_dtype)
except ValueError:
raise ValueError(
"%s is not in list of supported dtypes: %s"
% (object_dtype, ", ".join(dtypes))
)
class ModelConfig(FrozenAttr, metaclass=FrozenMeta):
"""Base class for model configurations."""
def __init__(self, **kwargs):
"""Initializes the configuration with a set of parameters."""
for key, value in kwargs.items():
setattr(self, key, value)
def to_dict(self):
"""Returns the configuration as a dictionary."""
return {
key: value
for key, value in self.__dict__.items()
if not key.startswith("_")
}
def save_as_json(self, path):
"""Saves the configuration as a JSON file."""
with open(path, "w", encoding="utf-8") as config_file:
json.dump(
self.to_dict(),
config_file,
indent=2,
sort_keys=True,
)
config_file.write("\n")
class ModelSpec(LayerSpec):
"""The top level layer specification."""
def __init__(self):
"""Initializes the model specification."""
self._config = self.get_default_config()
self._files = {}
@property
def name(self):
"""The name of the model specification."""
raise NotImplementedError()
@property
def revision(self):
"""The model specification revision.
This value is incremented each time the weights layout of the model is
changed (e.g. a weight is renamed).
"""
return 1
@property
def config(self):
"""The model configuration."""
return self._config
def get_default_config(self):
"""Returns the default configuration used by this model."""
return None
def register_file(self, path: str, filename: Optional[str] = None) -> None:
"""Registers a file to be saved in the model directory."""
if not os.path.isfile(path):
raise ValueError("File %s does not exist" % path)
if filename is None:
filename = os.path.basename(path)
if filename in self._files:
raise ValueError("A file with name %s was already registered" % filename)
self._files[filename] = path
def save(self, output_dir: str) -> None:
"""Saves this model on disk.
Arguments:
output_dir: Output directory where the model is saved.
"""
self._serialize(os.path.join(output_dir, "model.bin"))
if self._config is not None:
self._config.save_as_json(os.path.join(output_dir, "config.json"))
for filename, path in self._files.items():
destination = os.path.join(output_dir, filename)
if os.path.exists(destination):
raise RuntimeError(
"File %s already exists in the model directory" % destination
)
shutil.copy(path, destination)
def _serialize(self, path):
"""Serializes the model variables."""
variables = []
aliases = []
for variable in self.variables(ordered=True):
if isinstance(variable[1], str):
aliases.append(variable)
else:
variables.append(variable)
with open(path, "wb") as model:
def _write_string(string):
model.write(struct.pack("H", len(string) + 1))
model.write(string.encode("utf-8"))
model.write(struct.pack("B", 0))
model.write(struct.pack("I", CURRENT_BINARY_VERSION))
_write_string(self.name)
model.write(struct.pack("I", self.revision))
model.write(struct.pack("I", len(variables)))
for name, value in variables:
_write_string(name)
model.write(struct.pack("B", len(value.shape)))
for dim in value.shape:
model.write(struct.pack("I", dim))
model.write(struct.pack("B", _dtype_to_type_id(value.dtype)))
model.write(struct.pack("I", value.num_bytes()))
model.write(value.to_bytes())
model.write(struct.pack("I", len(aliases)))
for alias, variable_name in aliases:
_write_string(alias)
_write_string(variable_name)
def _flatten_vocabularies(vocabularies):
for name, vocabulary in vocabularies.items():
if len(vocabulary) == 1:
yield name, vocabulary[0]
else:
for i, vocab in enumerate(vocabulary):
yield "%s_%d" % (name, i + 1), vocab
class SequenceToSequenceModelConfig(ModelConfig):
"""Configuration for sequence-to-sequence models."""
def __init__(
self,
unk_token: str = "<unk>",
bos_token: str = "<s>",
eos_token: str = "</s>",
decoder_start_token: Optional[str] = "<s>",
add_source_bos: bool = False,
add_source_eos: bool = False,
**kwargs,
):
"""Initializes the configuration for sequence-to-sequence models.
Args:
unk_token: The unknown token.
bos_token: The start of sentence token.
eos_token: The end of sentence token.
decoder_start_token: The decoder start token. If ``None``, the token should
be passed by the user in the target prefix.
add_source_bos: If ``True``, ``bos_token`` will be automatically added to
the source input.
add_source_eos: If ``True``, ``eos_token`` will be automatically added to
the source input.
**kwargs: Additional configuration.
"""
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
decoder_start_token=decoder_start_token,
add_source_bos=add_source_bos,
add_source_eos=add_source_eos,
**kwargs,
)
class SequenceToSequenceModelSpec(ModelSpec):
"""Base specification for sequence to sequence models."""
def __init__(self):
"""Initializes a sequence to sequence model specification."""
super().__init__()
self._vocabularies = {
"source": [],
"target": [],
}
def get_default_config(self):
return SequenceToSequenceModelConfig()
@abc.abstractmethod
def get_source_vocabulary_size(self):
"""Returns the source vocabulary size expected by the model."""
raise NotImplementedError()
@abc.abstractmethod
def get_target_vocabulary_size(self):
"""Returns the target vocabulary size expected by the model."""
raise NotImplementedError()
def register_source_vocabulary(self, tokens: List[str]) -> None:
"""Registers a source vocabulary of tokens.
Arguments:
tokens: List of source tokens.
"""
self._vocabularies["source"].append(tokens)
def register_target_vocabulary(self, tokens: List[str]) -> None:
"""Registers a target vocabulary of tokens.
Arguments:
tokens: List of target tokens.
"""
self._vocabularies["target"].append(tokens)
def register_vocabulary_mapping(self, path: str) -> None:
"""Registers a vocabulary mapping file.
Arguments:
path: Path to the vocabulary mapping file.
"""
self.register_file(path, "vmap.txt")
def validate(self) -> None:
super().validate()
# Check that vocabularies are registered and have the correct size.
vocabulary_sizes = {
"source": self.get_source_vocabulary_size(),
"target": self.get_target_vocabulary_size(),
}
for name, sizes in vocabulary_sizes.items():
if not isinstance(sizes, list):
sizes = [sizes]
vocabularies = self._vocabularies[name]
if len(vocabularies) != len(sizes):
raise ValueError(
"Incorrect number of %s vocabularies: %d registered, but expected %d"
% (name, len(vocabularies), len(sizes))
)
for i, (vocabulary, expected_size) in enumerate(zip(vocabularies, sizes)):
if len(vocabulary) != expected_size:
raise ValueError(
"%s vocabulary %d has size %d but the model expected a vocabulary "
"of size %d"
% (name.capitalize(), i, len(vocabulary), expected_size)
)
def save(self, output_dir: str) -> None:
# Save the vocabularies.
vocabularies = dict(_flatten_vocabularies(self._vocabularies))
all_vocabularies = list(vocabularies.values())
if all(vocabulary == all_vocabularies[0] for vocabulary in all_vocabularies):
vocabularies = {"shared": all_vocabularies[0]}
for name, tokens in vocabularies.items():
_save_vocabulary(output_dir, "%s_vocabulary" % name, tokens)
# Save the rest of the model.
super().save(output_dir)
class LanguageModelConfig(ModelConfig):
"""Configuration for language models."""
def __init__(
self,
unk_token: str = "<unk>",
bos_token: str = "<s>",
eos_token: str = "</s>",
**kwargs,
):
"""Initializes the configuration for language models.
Args:
unk_token: The unknown token.
bos_token: The start of sentence token.
eos_token: The end of sentence token.
**kwargs: Additional configuration.
"""
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
**kwargs,
)
class LanguageModelSpec(ModelSpec):
"""Base specification for language models."""
def __init__(self):
"""Initializes a language model specification."""
super().__init__()
self._vocabulary = []
def get_default_config(self):
return LanguageModelConfig()
@abc.abstractmethod
def get_vocabulary_size(self):
"""Returns the vocabulary size expected by the model."""
raise NotImplementedError()
def register_vocabulary(self, tokens: List[str]) -> None:
"""Registers the vocabulary of tokens.
Arguments:
tokens: List of tokens.
"""
self._vocabulary = list(tokens)
def validate(self) -> None:
super().validate()
expected_vocabulary_size = self.get_vocabulary_size()
if len(self._vocabulary) != expected_vocabulary_size:
raise ValueError(
"Vocabulary has size %d but the model expected a vocabulary of size %d"
% (len(self._vocabulary), expected_vocabulary_size)
)
def save(self, output_dir: str) -> None:
# Save the vocabulary.
_save_vocabulary(output_dir, "vocabulary", self._vocabulary)
# Save the rest of the model.
super().save(output_dir)
def _save_vocabulary(output_dir, name, tokens):
vocabulary_path = os.path.join(output_dir, "%s.json" % name)
with open(vocabulary_path, "w", encoding="utf-8") as vocabulary_file:
json.dump(tokens, vocabulary_file, indent=2)
class Variable(abc.ABC):
"""Abstract base class for model variables."""
@property
@abc.abstractmethod
def shape(self) -> List[int]:
raise NotImplementedError()
def is_scalar(self) -> bool:
return len(self.shape) == 0
@property
@abc.abstractmethod
def dtype(self) -> str:
raise NotImplementedError()
def to(self, dtype: str) -> "Variable":
if dtype == self.dtype:
return self
return self._to(dtype)
@abc.abstractmethod
def numpy(self) -> np.ndarray:
raise NotImplementedError()
def equal(self, other) -> bool:
return type(self) is type(other) and self._equal(other)
@abc.abstractmethod
def num_bytes(self) -> int:
raise NotImplementedError()
@abc.abstractmethod
def to_bytes(self) -> bytes:
raise NotImplementedError()
@abc.abstractmethod
def _to(self, dtype: str) -> "Variable":
raise NotImplementedError()
@abc.abstractmethod
def _equal(self, other) -> bool:
raise NotImplementedError()
class NumpyVariable(Variable):
"""Model variable as a Numpy array."""
def __init__(self, array):
self.array = array
@property
def shape(self) -> List[int]:
return self.array.shape
@property
def dtype(self) -> str:
return self.array.dtype.name
def numpy(self) -> np.ndarray:
return self.array
def num_bytes(self) -> int:
return self.array.nbytes
def to_bytes(self) -> bytes:
return self.array.tobytes()
def _to(self, dtype: str) -> Variable:
if dtype == "bfloat16":
if not torch_is_available:
raise RuntimeError(
"Converting to bfloat16 requires torch to be installed"
)
return PyTorchVariable.from_numpy(self.array).to(dtype)
dtype = np.dtype(dtype)
self.array = self.array.astype(dtype)
return self
def _equal(self, other) -> bool:
a = self.array
b = other.array
return a is b or (
a.dtype == b.dtype
and a.shape == b.shape
and a.flat[0] == b.flat[0]
and np.array_equal(a, b)
)
class PyTorchVariable(Variable):
"""Model variable as a PyTorch tensor."""
def __init__(self, tensor):
if isinstance(tensor, torch.nn.Parameter):
tensor = tensor.data
self.tensor = tensor.contiguous()
@classmethod
def from_numpy(cls, array):
tensor = torch.from_numpy(array)
return cls(tensor)
@property
def shape(self) -> List[int]:
return list(self.tensor.shape)
@property
def dtype(self) -> str:
return str(self.tensor.dtype).replace("torch.", "")
def numpy(self) -> np.ndarray:
return self.tensor.numpy()
def num_bytes(self) -> int:
return self.tensor.numel() * self.tensor.element_size()
def to_bytes(self) -> bytes:
return ctypes.string_at(self.tensor.data_ptr(), self.num_bytes())
def _to(self, dtype: str) -> Variable:
dtype = getattr(torch, dtype)
self.tensor = self.tensor.to(dtype)
return self
def _equal(self, other) -> bool:
a = self.tensor
b = other.tensor
return a is b or (a.dtype == b.dtype and torch.equal(a, b))
| 23,334 | 31.319945 | 91 | py |
CTranslate2 | CTranslate2-master/python/ctranslate2/converters/transformers.py | import abc
import argparse
import gc
import itertools
import os
from typing import List, Optional
import numpy as np
try:
import huggingface_hub
import torch
import transformers
except ImportError:
pass
from ctranslate2.converters import utils
from ctranslate2.converters.converter import Converter
from ctranslate2.specs import common_spec, model_spec, transformer_spec, whisper_spec
_SUPPORTED_ACTIVATIONS = {
"gelu": common_spec.Activation.GELU,
"gelu_fast": common_spec.Activation.GELUTanh,
"gelu_new": common_spec.Activation.GELUTanh,
"gelu_python": common_spec.Activation.GELU,
"gelu_pytorch_tanh": common_spec.Activation.GELUTanh,
"quick_gelu": common_spec.Activation.GELUSigmoid,
"relu": common_spec.Activation.RELU,
"silu": common_spec.Activation.SWISH,
"swish": common_spec.Activation.SWISH,
}
_MODEL_LOADERS = {}
def register_loader(config_name):
"""Registers a model loader for this configuration name."""
def decorator(cls):
_MODEL_LOADERS[config_name] = cls()
return cls
return decorator
class TransformersConverter(Converter):
"""Converts models from Hugging Face Transformers."""
def __init__(
self,
model_name_or_path: str,
activation_scales: Optional[str] = None,
copy_files: Optional[List[str]] = None,
load_as_float16: bool = False,
revision: Optional[str] = None,
low_cpu_mem_usage: bool = False,
trust_remote_code: bool = False,
):
"""Initializes the converter.
Arguments:
model_name_or_path: Name of the pretrained model to download, or path to the
directory containing the pretrained model.
activation_scales: Path to the pre-computed activation scales. Models may
use them to rescale some weights to smooth the intermediate activations
and improve the quantization accuracy. See
https://github.com/mit-han-lab/smoothquant.
copy_files: List of filenames to copy from the Hugging Face model to the
converted model directory.
load_as_float16: Load the model weights as float16. More precisely, the model
will be loaded with ``from_pretrained(..., torch_dtype=torch.float16)``.
revision: Revision of the model to download from the Hugging Face Hub.
low_cpu_mem_usage: Enable the flag ``low_cpu_mem_usage`` when loading the model
with ``from_pretrained``.
trust_remote_code: Allow converting models using custom code.
"""
self._model_name_or_path = model_name_or_path
self._activation_scales = activation_scales
self._copy_files = copy_files
self._load_as_float16 = load_as_float16
self._revision = revision
self._low_cpu_mem_usage = low_cpu_mem_usage
self._trust_remote_code = trust_remote_code
def _load(self):
with torch.no_grad():
config = transformers.AutoConfig.from_pretrained(
self._model_name_or_path, trust_remote_code=self._trust_remote_code
)
config_name = config.__class__.__name__
loader = _MODEL_LOADERS.get(config_name)
if loader is None:
raise ValueError(
"No conversion is registered for the model configuration %s "
"(supported configurations are: %s)"
% (config_name, ", ".join(sorted(_MODEL_LOADERS.keys())))
)
model_class = getattr(transformers, loader.architecture_name)
tokenizer_class = transformers.AutoTokenizer
kwargs = {}
if self._load_as_float16:
kwargs["torch_dtype"] = torch.float16
if self._revision:
kwargs["revision"] = self._revision
if self._low_cpu_mem_usage:
kwargs["low_cpu_mem_usage"] = self._low_cpu_mem_usage
if self._trust_remote_code:
kwargs["trust_remote_code"] = self._trust_remote_code
model = self.load_model(model_class, self._model_name_or_path, **kwargs)
tokenizer_kwargs = {}
if self._trust_remote_code:
tokenizer_kwargs["trust_remote_code"] = self._trust_remote_code
tokenizer = self.load_tokenizer(
tokenizer_class, self._model_name_or_path, **tokenizer_kwargs
)
spec = loader(model, tokenizer)
if self._activation_scales:
activation_scales = torch.load(
self._activation_scales, map_location="cpu"
)
loader.smooth_activation(spec, activation_scales)
if self._copy_files:
for filename in self._copy_files:
spec.register_file(self.get_model_file(filename))
return spec
def load_model(self, model_class, model_name_or_path, **kwargs):
return model_class.from_pretrained(model_name_or_path, **kwargs)
def load_tokenizer(self, tokenizer_class, model_name_or_path, **kwargs):
return tokenizer_class.from_pretrained(model_name_or_path, **kwargs)
def get_model_file(self, filename):
if os.path.isdir(self._model_name_or_path):
path = os.path.join(self._model_name_or_path, filename)
else:
try:
path = huggingface_hub.hf_hub_download(
repo_id=self._model_name_or_path, filename=filename
)
except huggingface_hub.utils.EntryNotFoundError:
path = None
if path is None or not os.path.isfile(path):
raise ValueError(
"File %s does not exist in model %s"
% (filename, self._model_name_or_path)
)
return path
class ModelLoader(abc.ABC):
"""Base class for loading Transformers models into a CTranslate2 model specification."""
@property
def architecture_name(self):
return None
@abc.abstractmethod
def get_model_spec(self, model):
raise NotImplementedError()
def __call__(self, model, tokenizer):
spec = self.get_model_spec(model)
self.set_config(spec.config, model, tokenizer)
tokens = self.get_vocabulary(model, tokenizer)
self.set_vocabulary(spec, tokens)
return spec
def get_vocabulary(self, model, tokenizer):
return [
token
for token, _ in sorted(
tokenizer.get_vocab().items(), key=lambda item: item[1]
)
]
def set_vocabulary(self, spec, tokens):
pass
def set_config(self, config, model, tokenizer):
pass
def set_layer_norm(self, spec, module):
spec.gamma = module.weight
spec.beta = module.bias
def set_linear(self, spec, module):
spec.weight = module.weight
if isinstance(module, transformers.Conv1D):
spec.weight = spec.weight.transpose(0, 1)
if module.bias is not None:
spec.bias = module.bias
def set_embeddings(self, spec, module):
spec.weight = module.weight
def set_position_encodings(self, spec, module):
spec.encodings = module.weight
offset = getattr(module, "offset", 0)
if offset > 0:
spec.encodings = spec.encodings[offset:]
def smooth_activation(self, spec, activation_scales):
raise NotImplementedError(
"No activation smoothing logic is defined for this model"
)
@register_loader("BartConfig")
class BartLoader(ModelLoader):
@property
def architecture_name(self):
return "BartForConditionalGeneration"
def get_model_spec(self, model):
spec = transformer_spec.TransformerSpec.from_config(
(model.config.encoder_layers, model.config.decoder_layers),
model.config.encoder_attention_heads,
pre_norm=model.config.normalize_before,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
layernorm_embedding=getattr(model.config, "normalize_embedding", True),
)
self.set_encoder(spec.encoder, model.model.encoder)
self.set_decoder(spec.decoder, model.model.decoder)
self.set_linear(spec.decoder.projection, model.lm_head)
final_logits_bias = getattr(model, "final_logits_bias", None)
if final_logits_bias is not None and final_logits_bias.nonzero().numel() != 0:
spec.decoder.projection.bias = final_logits_bias.squeeze()
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
if model.config.vocab_size < len(tokens):
tokens = tokens[: model.config.vocab_size]
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_source_vocabulary(tokens)
spec.register_target_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
config.decoder_start_token = tokenizer.convert_ids_to_tokens(
model.config.decoder_start_token_id
)
def set_encoder(self, spec, encoder):
self.set_common_layers(spec, encoder)
for layer_spec, layer in zip(spec.layer, encoder.layers):
self.set_attention(
layer_spec.self_attention,
layer.self_attn,
self_attention=True,
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm,
layer.self_attn_layer_norm,
)
self.set_linear(layer_spec.ffn.linear_0, layer.fc1)
self.set_linear(layer_spec.ffn.linear_1, layer.fc2)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.final_layer_norm)
def set_decoder(self, spec, decoder):
self.set_common_layers(spec, decoder)
for layer_spec, layer in zip(spec.layer, decoder.layers):
self.set_attention(
layer_spec.self_attention,
layer.self_attn,
self_attention=True,
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm,
layer.self_attn_layer_norm,
)
if hasattr(layer, "encoder_attn"):
self.set_attention(
layer_spec.attention,
layer.encoder_attn,
self_attention=False,
)
self.set_layer_norm(
layer_spec.attention.layer_norm,
layer.encoder_attn_layer_norm,
)
self.set_linear(layer_spec.ffn.linear_0, layer.fc1)
self.set_linear(layer_spec.ffn.linear_1, layer.fc2)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.final_layer_norm)
def set_attention(self, spec, attention, self_attention=False):
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], attention.q_proj)
self.set_linear(split_layers[1], attention.k_proj)
self.set_linear(split_layers[2], attention.v_proj)
if self_attention:
utils.fuse_linear(spec.linear[0], split_layers)
else:
utils.fuse_linear(spec.linear[0], split_layers[:1])
utils.fuse_linear(spec.linear[1], split_layers[1:])
self.set_linear(spec.linear[-1], attention.out_proj)
def set_common_layers(self, spec, module):
spec.scale_embeddings = module.embed_scale
self.set_position_encodings(spec.position_encodings, module.embed_positions)
self.set_embeddings(
spec.embeddings[0]
if isinstance(spec.embeddings, list)
else spec.embeddings,
module.embed_tokens,
)
if hasattr(module, "layer_norm"):
self.set_layer_norm(spec.layer_norm, module.layer_norm)
if hasattr(module, "layernorm_embedding"):
self.set_layer_norm(spec.layernorm_embedding, module.layernorm_embedding)
@register_loader("MarianConfig")
class MarianMTLoader(BartLoader):
@property
def architecture_name(self):
return "MarianMTModel"
def get_model_spec(self, model):
model.config.normalize_before = False
model.config.normalize_embedding = False
spec = super().get_model_spec(model)
self._remove_pad_weights(spec)
return spec
def set_config(self, config, model, tokenizer):
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
# The decoder start token can be any token because the decoder always starts
# from a zero embedding.
config.decoder_start_token = tokenizer.eos_token
def set_decoder(self, spec, decoder):
spec.start_from_zero_embedding = True
super().set_decoder(spec, decoder)
def get_vocabulary(self, model, tokenizer):
# The <pad> token is added by Transformers to start the decoder from a zero embedding,
# but we already have a dedicated option "start_from_zero_embedding". We remove this token
# to match the original Marian vocabulary and prevent this token from being generated.
tokens = super().get_vocabulary(model, tokenizer)
if tokens[-1] == "<pad>":
tokens.pop()
return tokens
def _remove_pad_weights(self, spec):
vocab_specs = [
spec.encoder.embeddings[0],
spec.decoder.embeddings,
spec.decoder.projection,
]
# Weights may be shared so we check against the expected size to prevent
# updating the same weight multiple times.
new_vocab_size = vocab_specs[0].weight.shape[0] - 1
for vocab_spec in vocab_specs:
if vocab_spec.weight.shape[0] == new_vocab_size + 1:
vocab_spec.weight = vocab_spec.weight[:-1]
if (
isinstance(vocab_spec, common_spec.LinearSpec)
and vocab_spec.has_bias()
and vocab_spec.bias.shape[0] == new_vocab_size + 1
):
vocab_spec.bias = vocab_spec.bias[:-1]
@register_loader("M2M100Config")
class M2M100Loader(BartLoader):
@property
def architecture_name(self):
return "M2M100ForConditionalGeneration"
def get_model_spec(self, model):
model.config.normalize_before = True
model.config.normalize_embedding = False
return super().get_model_spec(model)
def set_position_encodings(self, spec, module):
spec.encodings = module.weights[module.offset :]
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
# Workaround for issue https://github.com/OpenNMT/CTranslate2/issues/1039.
if tokens[-1] == tokenizer.unk_token:
tokens.insert(tokenizer.unk_token_id, tokens.pop())
for token in tokenizer.additional_special_tokens:
if token not in tokens:
tokens.append(token)
num_madeup_words = getattr(
tokenizer, "num_madeup_words", model.config.vocab_size - len(tokens)
)
if num_madeup_words > 0:
tokens += ["madeupword%d" % i for i in range(num_madeup_words)]
return tokens
@register_loader("MBartConfig")
class MBartLoader(BartLoader):
@property
def architecture_name(self):
return "MBartForConditionalGeneration"
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
# MBart-25 passes the language code as the decoder start token.
if model.config.tokenizer_class in ("MBartTokenizer", None):
config.decoder_start_token = None
else:
config.decoder_start_token = tokenizer.eos_token
@register_loader("PegasusConfig")
class PegasusLoader(BartLoader):
@property
def architecture_name(self):
return "PegasusForConditionalGeneration"
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.pad_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
config.decoder_start_token = tokenizer.pad_token
@register_loader("OPTConfig")
class OPTLoader(BartLoader):
@property
def architecture_name(self):
return "OPTForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.num_hidden_layers,
model.config.num_attention_heads,
pre_norm=model.config.do_layer_norm_before,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
project_in_out=model.config.word_embed_proj_dim != model.config.hidden_size,
)
self.set_decoder(spec.decoder, model.model.decoder)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def smooth_activation(self, spec, activation_scales):
for i, layer in enumerate(spec.decoder.layer):
layer_scope = "model.decoder.layers.%d" % i
utils.smooth_activation(
layer.self_attention.layer_norm,
layer.self_attention.linear[0],
activation_scales["%s.self_attn.q_proj" % layer_scope],
)
utils.smooth_activation(
layer.ffn.layer_norm,
layer.ffn.linear_0,
activation_scales["%s.fc1" % layer_scope],
)
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, decoder):
super().set_decoder(spec, decoder)
if decoder.project_in is not None:
self.set_linear(spec.project_in, decoder.project_in)
if decoder.project_out is not None:
self.set_linear(spec.project_out, decoder.project_out)
if decoder.final_layer_norm is not None:
self.set_layer_norm(spec.layer_norm, decoder.final_layer_norm)
def set_common_layers(self, spec, module):
spec.scale_embeddings = False
self.set_position_encodings(spec.position_encodings, module.embed_positions)
self.set_embeddings(spec.embeddings, module.embed_tokens)
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
i = 0
while len(tokens) % 8 != 0:
symbol = "madeupword{:04d}".format(i)
if symbol not in tokens:
tokens.append(symbol)
i += 1
return tokens
@register_loader("GPTBigCodeConfig")
class GPTBigCodeMHALoader(ModelLoader):
@property
def architecture_name(self):
return "GPTBigCodeForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
multi_query_attention=True,
)
self.set_decoder(spec.decoder, model.transformer)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.wte)
self.set_position_encodings(spec.position_encodings, module.wpe)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(layer_spec.self_attention.layer_norm, layer.ln_1)
self.set_linear(layer_spec.self_attention.linear[0], layer.attn.c_attn)
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.c_proj)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.ln_2)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.c_fc)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.c_proj)
@register_loader("GPT2Config")
class GPT2Loader(ModelLoader):
@property
def architecture_name(self):
return "GPT2LMHeadModel"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
)
self.set_decoder(spec.decoder, model.transformer)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.wte)
self.set_position_encodings(spec.position_encodings, module.wpe)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(layer_spec.self_attention.layer_norm, layer.ln_1)
self.set_linear(layer_spec.self_attention.linear[0], layer.attn.c_attn)
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.c_proj)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.ln_2)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.c_fc)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.c_proj)
@register_loader("GPTJConfig")
class GPTJLoader(ModelLoader):
@property
def architecture_name(self):
return "GPTJForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
rotary_dim=model.config.rotary_dim,
rotary_interleave=False,
parallel_residual=True,
shared_layer_norm=True,
)
self.set_decoder(
spec.decoder,
model.transformer,
model.config.rotary_dim,
model.config.n_head,
)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module, rotary_dim, num_heads):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.wte)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(layer_spec.shared_layer_norm, layer.ln_1)
qw = layer.attn.q_proj.weight
kw = layer.attn.k_proj.weight
vw = layer.attn.v_proj.weight
qw = utils.permute_for_sliced_rotary(qw, num_heads, rotary_dim)
kw = utils.permute_for_sliced_rotary(kw, num_heads, rotary_dim)
layer_spec.self_attention.linear[0].weight = torch.cat((qw, kw, vw))
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.out_proj)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.fc_in)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.fc_out)
@register_loader("CodeGenConfig")
class CodeGenLoader(ModelLoader):
@property
def architecture_name(self):
return "CodeGenForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.activation_function],
rotary_dim=model.config.rotary_dim,
rotary_interleave=False,
parallel_residual=True,
shared_layer_norm=True,
)
mp_num = 4
if hasattr(model.config, "head_dim") and model.config.head_dim in [128, 256]:
# models forked from "Salesforce/codegen2-1B" and "Salesforce/codegen2-3_7B"
# use a special setting of mp_num=8, all other using 4
# these model.config's use a special setting of head_dim
mp_num = 8
self.set_decoder(
spec.decoder,
model.transformer,
model.config.rotary_dim,
model.config.n_head,
model.config.n_embd,
mp_num=mp_num,
)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
# fix for additional vocab, see GPTNeoX Converter
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module, rotary_dim, num_heads, embed_dim, mp_num):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.wte)
self.set_layer_norm(spec.layer_norm, module.ln_f)
base_permutation = np.arange(0, mp_num * 3).reshape(-1, 3).T.flatten().tolist()
local_dim = embed_dim // mp_num
permutation = torch.cat(
[torch.arange(i * local_dim, (i + 1) * local_dim) for i in base_permutation]
)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(layer_spec.shared_layer_norm, layer.ln_1)
# [start convert CodeGen to GPT-J format]
# see https://github.com/fauxpilot/fauxpilot/blob/fb4073a9078dd001ebeb7dfefb8cb2ecc8a88f4b/converter/codegen_gptj_convert.py # noqa
qkv_proj = layer.attn.qkv_proj.weight
# GPT-J and CodeGen slice up the qkv projection slightly differently.
# the following permutation brings Codegen 'qkv_proj'
# in GPT-J order of qw, vw, kw
# we permute the *rows* here because the computation is xA.T
new_qkv_proj = qkv_proj[permutation, :]
# the name QKV is misleading here; they are actually stored in QVK
qw, vw, kw = new_qkv_proj.chunk(3, dim=0)
# [end convert CodeGen to GPT-J.]
qw = utils.permute_for_sliced_rotary(qw, num_heads, rotary_dim)
kw = utils.permute_for_sliced_rotary(kw, num_heads, rotary_dim)
layer_spec.self_attention.linear[0].weight = torch.cat((qw, kw, vw))
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.out_proj)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.fc_in)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.fc_out)
@register_loader("GPTNeoXConfig")
class GPTNeoXLoader(ModelLoader):
@property
def architecture_name(self):
return "GPTNeoXForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.num_hidden_layers,
model.config.num_attention_heads,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.hidden_act],
rotary_dim=int(
model.config.rotary_pct
* (model.config.hidden_size // model.config.num_attention_heads)
),
rotary_interleave=False,
parallel_residual=model.config.use_parallel_residual,
shared_layer_norm=False,
)
self.set_decoder(spec.decoder, model.gpt_neox, model.config.num_attention_heads)
self.set_linear(spec.decoder.projection, model.embed_out)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module, num_heads):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.embed_in)
self.set_layer_norm(spec.layer_norm, module.final_layer_norm)
for layer_spec, layer in zip(spec.layer, module.layers):
if hasattr(layer_spec, "input_layer_norm"): # Use parallel residual.
self.set_layer_norm(layer_spec.input_layer_norm, layer.input_layernorm)
self.set_layer_norm(
layer_spec.post_attention_layer_norm, layer.post_attention_layernorm
)
else:
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.input_layernorm
)
self.set_layer_norm(
layer_spec.ffn.layer_norm, layer.post_attention_layernorm
)
qkv_w = layer.attention.query_key_value.weight
qkv_b = layer.attention.query_key_value.bias
qkv_w = (
qkv_w.reshape(num_heads, 3, -1, qkv_w.shape[-1])
.swapaxes(0, 1)
.reshape(-1, qkv_w.shape[-1])
)
qkv_b = qkv_b.reshape(num_heads, 3, -1).swapaxes(0, 1).reshape(-1)
layer_spec.self_attention.linear[0].weight = qkv_w
layer_spec.self_attention.linear[0].bias = qkv_b
self.set_linear(layer_spec.self_attention.linear[1], layer.attention.dense)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.dense_h_to_4h)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.dense_4h_to_h)
@register_loader("WhisperConfig")
class WhisperLoader(BartLoader):
@property
def architecture_name(self):
return "WhisperForConditionalGeneration"
def get_model_spec(self, model):
spec = whisper_spec.WhisperSpec(
model.config.encoder_layers,
model.config.encoder_attention_heads,
)
self.set_encoder(spec.encoder, model.model.encoder)
self.set_decoder(spec.decoder, model.model.decoder)
self.set_linear(spec.decoder.projection, model.proj_out)
return spec
def set_config(self, config, model, tokenizer):
config.suppress_ids = model.config.suppress_tokens
config.suppress_ids_begin = model.config.begin_suppress_tokens
config.lang_ids = tokenizer.additional_special_tokens_ids[2:-6]
config.alignment_heads = _WHISPER_ALIGNMENT_HEADS.get(model.name_or_path)
if config.alignment_heads is None:
# Use the last half layers for alignment by default.
num_layers = model.config.decoder_layers
num_heads = model.config.decoder_attention_heads
config.alignment_heads = list(
itertools.product(
range(num_layers // 2, num_layers),
range(num_heads),
)
)
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
# Add timestamp tokens.
tokens.extend(
"<|%.2f|>" % (i * 0.02)
for i in range(model.config.vocab_size - len(tokens))
)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_encoder(self, spec, encoder):
self.set_conv1d(spec.conv1, encoder.conv1)
self.set_conv1d(spec.conv2, encoder.conv2)
super().set_encoder(spec, encoder)
def set_decoder(self, spec, decoder):
self.set_embeddings(spec.embeddings, decoder.embed_tokens)
super().set_decoder(spec, decoder)
def set_common_layers(self, spec, module):
self.set_position_encodings(spec.position_encodings, module.embed_positions)
self.set_layer_norm(spec.layer_norm, module.layer_norm)
def set_conv1d(self, spec, module):
spec.weight = module.weight
spec.bias = module.bias
@register_loader("T5Config")
class T5Loader(ModelLoader):
@property
def architecture_name(self):
return "T5ForConditionalGeneration"
def get_model_spec(self, model):
spec = transformer_spec.TransformerSpec.from_config(
(model.config.num_layers, model.config.num_decoder_layers),
model.config.num_heads,
pre_norm=True,
activation=_SUPPORTED_ACTIVATIONS[model.config.dense_act_fn],
ffn_glu=model.config.is_gated_act,
relative_attention_bias=True,
rms_norm=True,
)
self.set_stack(spec.encoder, model.encoder)
self.set_stack(spec.decoder, model.decoder, is_decoder=True)
self.set_linear(spec.decoder.projection, model.lm_head)
if model.config.tie_word_embeddings:
spec.decoder.scale_outputs = model.config.d_model**-0.5
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_source_vocabulary(tokens)
spec.register_target_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.pad_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
config.decoder_start_token = tokenizer.pad_token
def set_stack(self, spec, module, is_decoder=False):
self.set_layer_norm(spec.layer_norm, module.final_layer_norm)
self.set_embeddings(
spec.embeddings[0]
if isinstance(spec.embeddings, list)
else spec.embeddings,
module.embed_tokens,
)
spec.scale_embeddings = False
for i, (layer_spec, block) in enumerate(zip(spec.layer, module.block)):
self.set_self_attention(layer_spec.self_attention, block.layer[0])
if i > 0:
# Reuse relative attention bias from the first layer.
first_self_attention = spec.layer[0].self_attention
layer_spec.self_attention.relative_attention_bias = (
first_self_attention.relative_attention_bias
)
layer_spec.self_attention.relative_attention_max_distance = (
first_self_attention.relative_attention_max_distance
)
if is_decoder:
self.set_cross_attention(layer_spec.attention, block.layer[1])
self.set_ffn(layer_spec.ffn, block.layer[-1])
def set_ffn(self, spec, module):
if hasattr(spec, "linear_0_noact"):
self.set_linear(spec.linear_0, module.DenseReluDense.wi_0)
self.set_linear(spec.linear_0_noact, module.DenseReluDense.wi_1)
else:
self.set_linear(spec.linear_0, module.DenseReluDense.wi)
self.set_linear(spec.linear_1, module.DenseReluDense.wo)
self.set_layer_norm(spec.layer_norm, module.layer_norm)
def set_self_attention(self, spec, module):
self.set_attention(spec, module.SelfAttention, self_attention=True)
self.set_layer_norm(spec.layer_norm, module.layer_norm)
def set_cross_attention(self, spec, module):
self.set_attention(spec, module.EncDecAttention)
self.set_layer_norm(spec.layer_norm, module.layer_norm)
def set_attention(self, spec, attention, self_attention=False):
spec.queries_scale = 1.0
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], attention.q)
self.set_linear(split_layers[1], attention.k)
self.set_linear(split_layers[2], attention.v)
if self_attention:
utils.fuse_linear(spec.linear[0], split_layers)
else:
utils.fuse_linear(spec.linear[0], split_layers[:1])
utils.fuse_linear(spec.linear[1], split_layers[1:])
self.set_linear(spec.linear[-1], attention.o)
if attention.has_relative_attention_bias:
spec.relative_attention_bias = attention.relative_attention_bias.weight
spec.relative_attention_max_distance = np.dtype("int32").type(
attention.relative_attention_max_distance
)
def set_layer_norm(self, spec, layer_norm):
spec.gamma = layer_norm.weight
@register_loader("MT5Config")
class MT5Loader(T5Loader):
@property
def architecture_name(self):
return "MT5ForConditionalGeneration"
@register_loader("BloomConfig")
class BloomLoader(ModelLoader):
@property
def architecture_name(self):
return "BloomForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=common_spec.Activation.GELUTanh,
layernorm_embedding=True,
alibi=True,
alibi_use_positive_positions=True,
)
self.set_decoder(spec.decoder, model.transformer)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.word_embeddings)
self.set_layer_norm(spec.layernorm_embedding, module.word_embeddings_layernorm)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.input_layernorm
)
self.set_qkv_linear(
layer_spec.self_attention.linear[0],
layer.self_attention.query_key_value,
layer.self_attention.num_heads,
)
self.set_linear(
layer_spec.self_attention.linear[1], layer.self_attention.dense
)
self.set_layer_norm(
layer_spec.ffn.layer_norm, layer.post_attention_layernorm
)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.dense_h_to_4h)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.dense_4h_to_h)
def set_qkv_linear(self, spec, module, num_heads):
weight = module.weight
weight = weight.reshape(num_heads, 3, -1, weight.shape[-1])
weight = weight.transpose(0, 1)
weight = weight.reshape(-1, weight.shape[-1])
bias = module.bias
bias = bias.reshape(num_heads, 3, -1)
bias = bias.transpose(0, 1)
bias = bias.reshape(-1)
spec.weight = weight
spec.bias = bias
@register_loader("MPTConfig")
class MPTLoader(ModelLoader):
@property
def architecture_name(self):
return "AutoModelForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layers,
model.config.n_heads,
pre_norm=True,
activation=common_spec.Activation.GELU,
alibi=True,
)
self.set_decoder(spec.decoder, model.transformer)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_decoder(self, spec, module):
self.set_embeddings(spec.embeddings, module.wte)
self.set_layer_norm(spec.layer_norm, module.norm_f)
spec.scale_embeddings = False
spec.projection.weight = spec.embeddings.weight
for layer_spec, layer in zip(spec.layer, module.blocks):
self.set_layer_norm(layer_spec.self_attention.layer_norm, layer.norm_1)
self.set_linear(layer_spec.self_attention.linear[0], layer.attn.Wqkv)
self.set_linear(layer_spec.self_attention.linear[1], layer.attn.out_proj)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.norm_2)
self.set_linear(layer_spec.ffn.linear_0, layer.ffn.up_proj)
self.set_linear(layer_spec.ffn.linear_1, layer.ffn.down_proj)
def set_layer_norm(self, spec, module):
spec.gamma = module.weight
spec.beta = torch.zeros_like(spec.gamma)
@register_loader("LlamaConfig")
class LlamaLoader(ModelLoader):
@property
def architecture_name(self):
return "LlamaForCausalLM"
def get_model_spec(self, model):
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.num_hidden_layers,
model.config.num_attention_heads,
activation=common_spec.Activation.SWISH,
pre_norm=True,
ffn_glu=True,
rms_norm=True,
rotary_dim=0,
rotary_interleave=False,
)
self.set_decoder(spec.decoder, model.model)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.unk_token
def set_layer_norm(self, spec, layer_norm):
spec.gamma = layer_norm.weight
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.embed_tokens)
self.set_layer_norm(spec.layer_norm, module.norm)
for layer_spec, layer in zip(spec.layer, module.layers):
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.input_layernorm
)
self.set_layer_norm(
layer_spec.ffn.layer_norm, layer.post_attention_layernorm
)
wq = layer.self_attn.q_proj.weight
wk = layer.self_attn.k_proj.weight
wv = layer.self_attn.v_proj.weight
wo = layer.self_attn.o_proj.weight
layer_spec.self_attention.linear[0].weight = torch.cat([wq, wk, wv])
layer_spec.self_attention.linear[1].weight = wo
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.gate_proj)
self.set_linear(layer_spec.ffn.linear_0_noact, layer.mlp.up_proj)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.down_proj)
delattr(layer, "self_attn")
delattr(layer, "mlp")
gc.collect()
@register_loader("RWConfig")
class RWLoader(ModelLoader):
@property
def architecture_name(self):
return "AutoModelForCausalLM"
def get_model_spec(self, model):
if getattr(model.config, "multi_query", False):
num_heads_kv = 1
else:
num_heads_kv = getattr(model.config, "n_head_kv", None)
spec = transformer_spec.TransformerDecoderModelSpec.from_config(
model.config.n_layer,
model.config.n_head,
pre_norm=True,
activation=common_spec.Activation.GELU,
alibi=model.config.alibi,
alibi_use_positive_positions=True,
rotary_dim=0,
rotary_interleave=False,
parallel_residual=model.config.parallel_attn,
shared_layer_norm=num_heads_kv == 1,
num_heads_kv=num_heads_kv,
)
self.set_decoder(spec.decoder, model.transformer)
self.set_linear(spec.decoder.projection, model.lm_head)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.eos_token
config.eos_token = tokenizer.eos_token
config.unk_token = tokenizer.eos_token
def set_decoder(self, spec, module):
spec.scale_embeddings = False
self.set_embeddings(spec.embeddings, module.word_embeddings)
self.set_layer_norm(spec.layer_norm, module.ln_f)
for layer_spec, layer in zip(spec.layer, module.h):
if hasattr(layer, "ln_attn"):
self.set_layer_norm(layer_spec.input_layer_norm, layer.ln_attn)
self.set_layer_norm(layer_spec.post_attention_layer_norm, layer.ln_mlp)
elif hasattr(layer_spec, "shared_layer_norm"):
self.set_layer_norm(layer_spec.shared_layer_norm, layer.input_layernorm)
else:
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.input_layernorm
)
self.set_layer_norm(
layer_spec.ffn.layer_norm, layer.post_attention_layernorm
)
if layer.self_attention.num_kv == 1:
self.set_linear(
layer_spec.self_attention.linear[0],
layer.self_attention.query_key_value,
)
else:
self.set_qkv_linear(
layer_spec.self_attention.linear[0],
layer.self_attention.query_key_value,
layer.self_attention.num_heads,
layer.self_attention.num_kv
if layer.self_attention.num_kv < layer.self_attention.num_heads
else None,
)
self.set_linear(
layer_spec.self_attention.linear[1], layer.self_attention.dense
)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.dense_h_to_4h)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.dense_4h_to_h)
def set_qkv_linear(self, spec, module, num_heads, num_kv=None):
weight = module.weight
if num_kv is None:
weight = weight.reshape(num_heads, 3, -1, weight.shape[-1])
weight = weight.transpose(0, 1)
weight = weight.reshape(-1, weight.shape[-1])
else:
head_dim = weight.shape[0] // (num_heads + num_kv * 2)
weight = weight.reshape(
-1, num_heads // num_kv + 2, head_dim, weight.shape[-1]
)
q, k, v = weight.split([num_heads // num_kv, 1, 1], dim=1)
weight = torch.cat(
[
q.reshape(num_heads * head_dim, -1),
k.reshape(num_kv * head_dim, -1),
v.reshape(num_kv * head_dim, -1),
]
)
spec.weight = weight
if module.bias is not None:
bias = module.bias
if num_kv is None:
bias = bias.reshape(num_heads, 3, -1)
bias = bias.transpose(0, 1)
bias = bias.reshape(-1)
else:
bias = bias.reshape(-1, num_heads // num_kv + 2, head_dim)
q, k, v = bias.split([num_heads // num_kv, 1, 1], dim=1)
bias = torch.cat(
[
q.reshape(num_heads * head_dim),
k.reshape(num_kv * head_dim),
v.reshape(num_kv * head_dim),
]
)
spec.bias = bias
@register_loader("BertConfig")
class BertLoader(ModelLoader):
@property
def architecture_name(self):
return "BertModel"
def get_model_spec(self, model):
assert model.config.position_embedding_type == "absolute"
encoder_spec = transformer_spec.TransformerEncoderSpec(
model.config.num_hidden_layers,
model.config.num_attention_heads,
pre_norm=False,
activation=_SUPPORTED_ACTIVATIONS[model.config.hidden_act],
layernorm_embedding=True,
num_source_embeddings=2,
embeddings_merge=common_spec.EmbeddingsMerge.ADD,
)
spec = transformer_spec.TransformerEncoderModelSpec(
encoder_spec,
pooling_layer=True,
pooling_activation=common_spec.Activation.Tanh,
)
spec.encoder.scale_embeddings = False
self.set_embeddings(
spec.encoder.embeddings[0], model.embeddings.word_embeddings
)
self.set_embeddings(
spec.encoder.embeddings[1], model.embeddings.token_type_embeddings
)
self.set_position_encodings(
spec.encoder.position_encodings, model.embeddings.position_embeddings
)
self.set_layer_norm(
spec.encoder.layernorm_embedding, model.embeddings.LayerNorm
)
self.set_linear(spec.pooler_dense, model.pooler.dense)
for layer_spec, layer in zip(spec.encoder.layer, model.encoder.layer):
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], layer.attention.self.query)
self.set_linear(split_layers[1], layer.attention.self.key)
self.set_linear(split_layers[2], layer.attention.self.value)
utils.fuse_linear(layer_spec.self_attention.linear[0], split_layers)
self.set_linear(
layer_spec.self_attention.linear[1], layer.attention.output.dense
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.attention.output.LayerNorm
)
self.set_linear(layer_spec.ffn.linear_0, layer.intermediate.dense)
self.set_linear(layer_spec.ffn.linear_1, layer.output.dense)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.output.LayerNorm)
return spec
def get_vocabulary(self, model, tokenizer):
tokens = super().get_vocabulary(model, tokenizer)
extra_ids = model.config.vocab_size - len(tokens)
for i in range(extra_ids):
tokens.append("<extra_id_%d>" % i)
return tokens
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.unk_token = tokenizer.unk_token
config.layer_norm_epsilon = model.config.layer_norm_eps
@register_loader("XLMRobertaConfig")
class XLMRobertaLoader(ModelLoader):
@property
def architecture_name(self):
return "XLMRobertaForSequenceClassification"
def get_model_spec(self, model):
assert model.config.position_embedding_type == "absolute"
encoder_spec = transformer_spec.TransformerEncoderSpec(
model.config.num_hidden_layers,
model.config.num_attention_heads,
pre_norm=False,
activation=_SUPPORTED_ACTIVATIONS[model.config.hidden_act],
layernorm_embedding=True,
num_source_embeddings=2,
embeddings_merge=common_spec.EmbeddingsMerge.ADD,
)
if model.roberta.pooler is None:
pooling_layer = False
else:
pooling_layer = True
spec = transformer_spec.TransformerEncoderModelSpec(
encoder_spec,
pooling_layer=pooling_layer,
pooling_activation=common_spec.Activation.Tanh,
)
spec.encoder.scale_embeddings = False
self.set_embeddings(
spec.encoder.embeddings[0], model.roberta.embeddings.word_embeddings
)
self.set_embeddings(
spec.encoder.embeddings[1], model.roberta.embeddings.token_type_embeddings
)
self.set_position_encodings(
spec.encoder.position_encodings,
model.roberta.embeddings.position_embeddings,
)
self.set_layer_norm(
spec.encoder.layernorm_embedding, model.roberta.embeddings.LayerNorm
)
if pooling_layer:
self.set_linear(spec.pooler_dense, model.roberta.pooler.dense)
for layer_spec, layer in zip(spec.encoder.layer, model.roberta.encoder.layer):
split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(split_layers[0], layer.attention.self.query)
self.set_linear(split_layers[1], layer.attention.self.key)
self.set_linear(split_layers[2], layer.attention.self.value)
utils.fuse_linear(layer_spec.self_attention.linear[0], split_layers)
self.set_linear(
layer_spec.self_attention.linear[1], layer.attention.output.dense
)
self.set_layer_norm(
layer_spec.self_attention.layer_norm, layer.attention.output.LayerNorm
)
self.set_linear(layer_spec.ffn.linear_0, layer.intermediate.dense)
self.set_linear(layer_spec.ffn.linear_1, layer.output.dense)
self.set_layer_norm(layer_spec.ffn.layer_norm, layer.output.LayerNorm)
return spec
def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)
def set_config(self, config, model, tokenizer):
config.unk_token = tokenizer.unk_token
config.layer_norm_epsilon = model.config.layer_norm_eps
def set_position_encodings(self, spec, module):
spec.encodings = module.weight
offset = getattr(module, "padding_idx", 0)
if offset > 0:
spec.encodings = spec.encodings[offset + 1 :]
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--model",
required=True,
help=(
"Name of the pretrained model to download, "
"or path to a directory containing the pretrained model."
),
)
parser.add_argument(
"--activation_scales",
help=(
"Path to the pre-computed activation scales. Models may "
"use them to rescale some weights to smooth the intermediate activations "
"and improve the quantization accuracy. See "
"https://github.com/mit-han-lab/smoothquant."
),
)
parser.add_argument(
"--copy_files",
nargs="+",
help=(
"List of filenames to copy from the Hugging Face model to the converted "
"model directory."
),
)
parser.add_argument(
"--revision",
help="Revision of the model to download from the Hugging Face Hub.",
)
parser.add_argument(
"--low_cpu_mem_usage",
action="store_true",
help="Enable the flag low_cpu_mem_usage when loading the model with from_pretrained.",
)
parser.add_argument(
"--trust_remote_code",
action="store_true",
help="Allow converting models using custom code.",
)
Converter.declare_arguments(parser)
args = parser.parse_args()
converter = TransformersConverter(
args.model,
activation_scales=args.activation_scales,
copy_files=args.copy_files,
load_as_float16=args.quantization in ("float16", "int8_float16"),
revision=args.revision,
low_cpu_mem_usage=args.low_cpu_mem_usage,
trust_remote_code=args.trust_remote_code,
)
converter.convert_from_args(args)
if __name__ == "__main__":
main()
# Cross-attention heads that are highly correlated to the word-level timing,
# i.e. the alignment between audio and text tokens.
# Obtained from https://github.com/openai/whisper/blob/v20230306/whisper/__init__.py#L31-L45
_WHISPER_ALIGNMENT_HEADS = {
"openai/whisper-tiny.en": [
(1, 0),
(2, 0),
(2, 5),
(3, 0),
(3, 1),
(3, 2),
(3, 3),
(3, 4),
],
"openai/whisper-tiny": [(2, 2), (3, 0), (3, 2), (3, 3), (3, 4), (3, 5)],
"openai/whisper-base.en": [(3, 3), (4, 7), (5, 1), (5, 5), (5, 7)],
"openai/whisper-base": [
(3, 1),
(4, 2),
(4, 3),
(4, 7),
(5, 1),
(5, 2),
(5, 4),
(5, 6),
],
"openai/whisper-small.en": [
(6, 6),
(7, 0),
(7, 3),
(7, 8),
(8, 2),
(8, 5),
(8, 7),
(9, 0),
(9, 4),
(9, 8),
(9, 10),
(10, 0),
(10, 1),
(10, 2),
(10, 3),
(10, 6),
(10, 11),
(11, 2),
(11, 4),
],
"openai/whisper-small": [
(5, 3),
(5, 9),
(8, 0),
(8, 4),
(8, 7),
(8, 8),
(9, 0),
(9, 7),
(9, 9),
(10, 5),
],
"openai/whisper-medium.en": [
(11, 4),
(14, 1),
(14, 12),
(14, 14),
(15, 4),
(16, 0),
(16, 4),
(16, 9),
(17, 12),
(17, 14),
(18, 7),
(18, 10),
(18, 15),
(20, 0),
(20, 3),
(20, 9),
(20, 14),
(21, 12),
],
"openai/whisper-medium": [(13, 15), (15, 4), (15, 15), (16, 1), (20, 0), (23, 4)],
"openai/whisper-large": [
(9, 19),
(11, 2),
(11, 4),
(11, 17),
(22, 7),
(22, 11),
(22, 17),
(23, 2),
(23, 15),
],
"openai/whisper-large-v2": [
(10, 12),
(13, 17),
(16, 11),
(16, 12),
(16, 13),
(17, 15),
(17, 16),
(18, 4),
(18, 11),
(18, 19),
(19, 11),
(21, 2),
(21, 3),
(22, 3),
(22, 9),
(22, 12),
(23, 5),
(23, 7),
(23, 13),
(25, 5),
(26, 1),
(26, 12),
(27, 15),
],
}
| 60,929 | 34.486313 | 143 | py |
CTranslate2 | CTranslate2-master/python/ctranslate2/converters/fairseq.py | import argparse
import os
from typing import Optional
from ctranslate2.converters import utils
from ctranslate2.converters.converter import Converter
from ctranslate2.specs import common_spec, transformer_spec
_SUPPORTED_MODELS = {
"bart",
"multilingual_transformer",
"transformer",
"transformer_align",
"transformer_lm",
}
_SUPPORTED_ACTIVATIONS = {
"gelu": common_spec.Activation.GELU,
"gelu_accurate": common_spec.Activation.GELUTanh,
"gelu_fast": common_spec.Activation.GELUTanh,
"relu": common_spec.Activation.RELU,
"swish": common_spec.Activation.SWISH,
}
def _get_model_spec(args):
import fairseq
activation_fn = getattr(args, "activation_fn", "relu")
model_name = fairseq.models.ARCH_MODEL_NAME_REGISTRY[args.arch]
check = utils.ConfigurationChecker()
check(
model_name in _SUPPORTED_MODELS,
"Model '%s' used by architecture '%s' is not supported (supported models are: %s)"
% (model_name, args.arch, ", ".join(_SUPPORTED_MODELS)),
)
check.validate()
check(
activation_fn in _SUPPORTED_ACTIVATIONS,
"Option --activation-fn %s is not supported (supported activations are: %s)"
% (activation_fn, ", ".join(_SUPPORTED_ACTIVATIONS.keys())),
)
check(
not getattr(args, "no_token_positional_embeddings", False),
"Option --no-token-positional-embeddings is not supported",
)
check(
not getattr(args, "lang_tok_replacing_bos_eos", False),
"Option --lang-tok-replacing-bos-eos is not supported",
)
if model_name == "transformer_lm":
check(
not args.character_embeddings,
"Option --character-embeddings is not supported",
)
check(
not args.adaptive_input,
"Option --adaptive-input is not supported",
)
check.validate()
return transformer_spec.TransformerDecoderModelSpec.from_config(
args.decoder_layers,
args.decoder_attention_heads,
pre_norm=args.decoder_normalize_before,
activation=_SUPPORTED_ACTIVATIONS[activation_fn],
layernorm_embedding=getattr(args, "layernorm_embedding", False),
no_final_norm=args.no_decoder_final_norm,
project_in_out=args.decoder_input_dim != args.decoder_embed_dim,
)
else:
check(
args.encoder_normalize_before == args.decoder_normalize_before,
"Options --encoder-normalize-before and --decoder-normalize-before "
"must have the same value",
)
check(
args.encoder_attention_heads == args.decoder_attention_heads,
"Options --encoder-attention-heads and --decoder-attention-heads "
"must have the same value",
)
check.validate()
return transformer_spec.TransformerSpec.from_config(
(args.encoder_layers, args.decoder_layers),
args.encoder_attention_heads,
pre_norm=args.encoder_normalize_before,
activation=_SUPPORTED_ACTIVATIONS[activation_fn],
alignment_layer=getattr(args, "alignment_layer", -1),
alignment_heads=getattr(args, "alignment_heads", 0),
layernorm_embedding=getattr(args, "layernorm_embedding", False),
)
def _get_vocab(dictionary):
return ["<blank>" if token == "<pad>" else token for token in dictionary.symbols]
class FairseqConverter(Converter):
"""Converts models trained with Fairseq."""
def __init__(
self,
model_path: str,
data_dir: str,
source_lang: Optional[str] = None,
target_lang: Optional[str] = None,
fixed_dictionary: Optional[str] = None,
no_default_special_tokens: bool = False,
user_dir: Optional[str] = None,
):
"""Initializes the Fairseq converter.
Arguments:
model_path: Path to the Fairseq PyTorch model (.pt file).
data_dir: Path to the Fairseq data directory containing vocabulary files.
source_lang: Source language (may be required if not declared in the model).
target_lang: Target language (may be required if not declared in the model).
fixed_dictionary: Path to the fixed dictionary for multilingual models.
no_default_special_tokens: Require all special tokens to be provided by the user
(e.g. encoder end token, decoder start token).
user_dir: Path to the user directory containing custom extensions.
"""
self._model_path = model_path
self._data_dir = data_dir
self._fixed_dictionary = fixed_dictionary
self._source_lang = source_lang
self._target_lang = target_lang
self._no_default_special_tokens = no_default_special_tokens
self._user_dir = user_dir
def _load(self):
import fairseq
import torch
from fairseq import checkpoint_utils
if self._user_dir:
from fairseq.utils import import_user_module
import_user_module(argparse.Namespace(user_dir=self._user_dir))
with torch.no_grad():
checkpoint = checkpoint_utils.load_checkpoint_to_cpu(self._model_path)
args = checkpoint["args"] or checkpoint["cfg"]["model"]
args.data = self._data_dir
if self._fixed_dictionary is not None:
args.fixed_dictionary = self._fixed_dictionary
if hasattr(args, "lang_dict") and args.lang_dict:
args.lang_dict = os.path.join(
self._data_dir, os.path.basename(args.lang_dict)
)
if self._source_lang is not None:
args.source_lang = self._source_lang
if self._target_lang is not None:
args.target_lang = self._target_lang
spec = _get_model_spec(args)
task = fairseq.tasks.setup_task(args)
model = fairseq.models.build_model(args, task)
model.eval()
model.load_state_dict(checkpoint["model"])
if isinstance(spec, transformer_spec.TransformerDecoderModelSpec):
set_transformer_decoder(
spec.decoder,
model.decoder,
with_encoder_attention=False,
)
spec.register_vocabulary(_get_vocab(task.dictionary))
if not args.add_bos_token:
spec.config.bos_token = spec.config.eos_token
else:
set_transformer_encoder(spec.encoder, model.encoder)
set_transformer_decoder(spec.decoder, model.decoder)
spec.register_source_vocabulary(_get_vocab(task.source_dictionary))
spec.register_target_vocabulary(_get_vocab(task.target_dictionary))
if self._no_default_special_tokens:
spec.config.decoder_start_token = None
else:
spec.config.decoder_start_token = spec.config.eos_token
spec.config.add_source_eos = True
return spec
def set_transformer_encoder(spec, module):
set_input_layers(spec, module)
for layer_spec, layer in zip(spec.layer, module.layers):
set_transformer_encoder_layer(layer_spec, layer)
if module.layer_norm is not None:
set_layer_norm(spec.layer_norm, module.layer_norm)
if module.layernorm_embedding is not None:
set_layer_norm(spec.layernorm_embedding, module.layernorm_embedding)
def set_transformer_decoder(spec, module, with_encoder_attention=True):
set_input_layers(spec, module)
set_linear(spec.projection, module.output_projection)
for layer_spec, layer in zip(spec.layer, module.layers):
set_transformer_decoder_layer(
layer_spec,
layer,
with_encoder_attention=with_encoder_attention,
)
if module.layer_norm is not None:
set_layer_norm(spec.layer_norm, module.layer_norm)
if module.layernorm_embedding is not None:
set_layer_norm(spec.layernorm_embedding, module.layernorm_embedding)
if module.project_in_dim is not None:
set_linear(spec.project_in, module.project_in_dim)
if module.project_out_dim is not None:
set_linear(spec.project_out, module.project_out_dim)
def set_input_layers(spec, module):
set_position_encodings(spec.position_encodings, module.embed_positions)
set_embeddings(
spec.embeddings[0] if isinstance(spec.embeddings, list) else spec.embeddings,
module.embed_tokens,
)
spec.scale_embeddings = module.embed_scale
def set_transformer_encoder_layer(spec, module):
set_ffn(spec.ffn, module)
set_multi_head_attention(spec.self_attention, module.self_attn, self_attention=True)
set_layer_norm(spec.self_attention.layer_norm, module.self_attn_layer_norm)
def set_transformer_decoder_layer(spec, module, with_encoder_attention=True):
set_ffn(spec.ffn, module)
set_multi_head_attention(spec.self_attention, module.self_attn, self_attention=True)
set_layer_norm(spec.self_attention.layer_norm, module.self_attn_layer_norm)
if with_encoder_attention:
set_multi_head_attention(spec.attention, module.encoder_attn)
set_layer_norm(spec.attention.layer_norm, module.encoder_attn_layer_norm)
def set_ffn(spec, module):
set_layer_norm(spec.layer_norm, module.final_layer_norm)
set_linear(spec.linear_0, module.fc1)
set_linear(spec.linear_1, module.fc2)
def set_multi_head_attention(spec, module, self_attention=False):
if self_attention:
split_layers = [common_spec.LinearSpec() for _ in range(3)]
set_linear(split_layers[0], module.q_proj)
set_linear(split_layers[1], module.k_proj)
set_linear(split_layers[2], module.v_proj)
utils.fuse_linear(spec.linear[0], split_layers)
else:
set_linear(spec.linear[0], module.q_proj)
split_layers = [common_spec.LinearSpec() for _ in range(2)]
set_linear(split_layers[0], module.k_proj)
set_linear(split_layers[1], module.v_proj)
utils.fuse_linear(spec.linear[1], split_layers)
set_linear(spec.linear[-1], module.out_proj)
def set_layer_norm(spec, module):
spec.gamma = module.weight.numpy()
spec.beta = module.bias.numpy()
def set_linear(spec, module):
spec.weight = module.weight.numpy()
if module.bias is not None:
spec.bias = module.bias.numpy()
def set_embeddings(spec, module):
spec.weight = module.weight.numpy()
def set_position_encodings(spec, module):
import torch
weight = module.weight if isinstance(module, torch.nn.Embedding) else module.weights
spec.encodings = weight.numpy()[module.padding_idx + 1 :]
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--model_path", required=True, help="Model path.")
parser.add_argument(
"--data_dir",
required=True,
help="Data directory containing the source and target vocabularies.",
)
parser.add_argument(
"--user_dir",
help="Directory containing custom extensions.",
)
parser.add_argument(
"--fixed_dictionary",
help="Fixed dictionary for multilingual models.",
)
parser.add_argument(
"--source_lang",
help="Source language. This argument is used to find dictionary file from `data_dir`.",
)
parser.add_argument(
"--target_lang",
help="Target language. This argument is used to find dictionary file from `data_dir`.",
)
parser.add_argument(
"--no_default_special_tokens",
action="store_true",
help=(
"Require all special tokens to be provided by the user during inference, "
"including the decoder start token."
),
)
Converter.declare_arguments(parser)
args = parser.parse_args()
converter = FairseqConverter(
args.model_path,
args.data_dir,
source_lang=args.source_lang,
target_lang=args.target_lang,
fixed_dictionary=args.fixed_dictionary,
no_default_special_tokens=args.no_default_special_tokens,
user_dir=args.user_dir,
)
converter.convert_from_args(args)
if __name__ == "__main__":
main()
| 12,420 | 34.898844 | 95 | py |
CTranslate2 | CTranslate2-master/python/ctranslate2/converters/utils.py | import numpy as np
def fuse_linear(spec, layers):
if not layers:
raise ValueError("Cannot fuse linear layers: at least one layer is required")
if isinstance(layers[0].weight, np.ndarray):
np_api = np
else:
import torch as np_api
spec.weight = np_api.concatenate([layer.weight for layer in layers])
bias_dtype = None
for layer in layers:
if layer.has_bias():
bias_dtype = layer.bias.dtype
break
if bias_dtype is not None:
spec.bias = np_api.concatenate(
[
layer.bias
if layer.has_bias()
else np_api.zeros([layer.weight.shape[0]], dtype=bias_dtype)
for layer in layers
]
)
def permute_for_sliced_rotary(weight, num_heads, rotary_dim=None):
"""Permutes the weight to use the sliced rotary implementation."""
if rotary_dim is not None:
weight = weight.reshape(num_heads, weight.shape[0] // num_heads, -1)
rotary_weight = weight[:, :rotary_dim]
rotary_weight = permute_for_sliced_rotary(
rotary_weight.reshape(num_heads * rotary_dim, -1), num_heads
).reshape(num_heads, rotary_dim, -1)
weight[:, :rotary_dim] = rotary_weight
return weight.reshape(-1, weight.shape[-1])
return (
weight.reshape(num_heads, weight.shape[0] // num_heads // 2, 2, weight.shape[1])
.swapaxes(1, 2)
.reshape(weight.shape[0], weight.shape[1])
)
def smooth_activation(layer_norm, linear, activation_scales):
"""Applies the activation smoothing technique described in
https://github.com/mit-han-lab/smoothquant.
"""
if not isinstance(linear.weight, np.ndarray):
linear_weight = linear.weight.numpy()
activation_scales = activation_scales.numpy()
else:
linear_weight = linear.weight
weight_scales = np.amax(np.absolute(linear_weight), axis=0)
weight_scales = np.maximum(weight_scales, 1e-5)
activation_scales = activation_scales.astype(weight_scales.dtype)
scales = np.sqrt(activation_scales / weight_scales)
scales = np.maximum(scales, 1e-5)
if not isinstance(linear.weight, np.ndarray):
import torch
scales = torch.from_numpy(scales)
layer_norm.gamma /= scales
layer_norm.beta /= scales
linear.weight *= scales.reshape(1, -1)
def raise_unsupported(reasons):
message = (
"The model you are trying to convert is not supported by CTranslate2. "
"We identified the following reasons:\n"
)
for reason in reasons:
message += "\n- " + reason
raise ValueError(message)
class ConfigurationChecker:
def __init__(self):
self._unsupported_reasons = []
def __call__(self, assert_condition, error_message):
if not assert_condition:
self._unsupported_reasons.append(error_message)
def validate(self):
if self._unsupported_reasons:
raise_unsupported(self._unsupported_reasons)
| 3,041 | 28.533981 | 88 | py |
CTranslate2 | CTranslate2-master/python/ctranslate2/converters/opennmt_py.py | import argparse
from ctranslate2.converters import utils
from ctranslate2.converters.converter import Converter
from ctranslate2.specs import common_spec, transformer_spec
_SUPPORTED_ACTIVATIONS = {
"gelu": common_spec.Activation.GELU,
"fast_gelu": common_spec.Activation.GELUTanh,
"relu": common_spec.Activation.RELU,
"silu": common_spec.Activation.SWISH,
}
_SUPPORTED_FEATURES_MERGE = {
"concat": common_spec.EmbeddingsMerge.CONCAT,
"sum": common_spec.EmbeddingsMerge.ADD,
}
def check_opt(opt, num_source_embeddings):
with_relative_position = getattr(opt, "max_relative_positions", 0) > 0
with_rotary = getattr(opt, "max_relative_positions", 0) == -1
with_alibi = getattr(opt, "max_relative_positions", 0) == -2
activation_fn = getattr(opt, "pos_ffn_activation_fn", "relu")
feat_merge = getattr(opt, "feat_merge", "concat")
self_attn_type = getattr(opt, "self_attn_type", "scaled-dot")
check = utils.ConfigurationChecker()
check(
opt.encoder_type == opt.decoder_type
and opt.decoder_type in {"transformer", "transformer_lm"},
"Options --encoder_type and --decoder_type must be"
" 'transformer' or 'transformer_lm",
)
check(
self_attn_type == "scaled-dot",
"Option --self_attn_type %s is not supported (supported values are: scaled-dot)"
% self_attn_type,
)
check(
activation_fn in _SUPPORTED_ACTIVATIONS,
"Option --pos_ffn_activation_fn %s is not supported (supported activations are: %s)"
% (activation_fn, ", ".join(_SUPPORTED_ACTIVATIONS.keys())),
)
check(
opt.position_encoding != (with_relative_position or with_rotary or with_alibi),
"Options --position_encoding and --max_relative_positions cannot be both enabled "
"or both disabled",
)
check(
num_source_embeddings == 1 or feat_merge in _SUPPORTED_FEATURES_MERGE,
"Option --feat_merge %s is not supported (supported merge modes are: %s)"
% (feat_merge, " ".join(_SUPPORTED_FEATURES_MERGE.keys())),
)
check.validate()
def _get_model_spec_seq2seq(
opt, variables, src_vocabs, tgt_vocabs, num_source_embeddings
):
"""Creates a model specification from the model options."""
with_relative_position = getattr(opt, "max_relative_positions", 0) > 0
activation_fn = getattr(opt, "pos_ffn_activation_fn", "relu")
feat_merge = getattr(opt, "feat_merge", "concat")
# Return the first head of the last layer unless the model was trained with alignments.
if getattr(opt, "lambda_align", 0) == 0:
alignment_layer = -1
alignment_heads = 1
else:
alignment_layer = opt.alignment_layer
alignment_heads = opt.alignment_heads
num_heads = getattr(opt, "heads", 8)
model_spec = transformer_spec.TransformerSpec.from_config(
(opt.enc_layers, opt.dec_layers),
num_heads,
with_relative_position=with_relative_position,
activation=_SUPPORTED_ACTIVATIONS[activation_fn],
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
num_source_embeddings=num_source_embeddings,
embeddings_merge=_SUPPORTED_FEATURES_MERGE[feat_merge],
multi_query_attention=getattr(opt, "multiquery", False),
)
model_spec.config.decoder_start_token = getattr(opt, "decoder_start_token", "<s>")
set_transformer_spec(model_spec, variables)
for src_vocab in src_vocabs:
model_spec.register_source_vocabulary(src_vocab)
for tgt_vocab in tgt_vocabs:
model_spec.register_target_vocabulary(tgt_vocab)
return model_spec
def _get_model_spec_lm(opt, variables, src_vocabs, tgt_vocabs, num_source_embeddings):
"""Creates a model specification from the model options."""
with_relative_position = getattr(opt, "max_relative_positions", 0) > 0
with_rotary = getattr(opt, "max_relative_positions", 0) == -1
with_alibi = getattr(opt, "max_relative_positions", 0) == -2
activation_fn = getattr(opt, "pos_ffn_activation_fn", "relu")
num_heads = getattr(opt, "heads", 8)
rotary_dim = 0 if with_rotary else None
ffn_glu = activation_fn == "silu"
model_spec = transformer_spec.TransformerDecoderModelSpec.from_config(
opt.dec_layers,
num_heads,
activation=_SUPPORTED_ACTIVATIONS[activation_fn],
ffn_glu=ffn_glu,
with_relative_position=with_relative_position,
alibi=with_alibi,
rms_norm=opt.layer_norm == "rms",
rotary_dim=rotary_dim,
rotary_interleave=True,
multi_query_attention=getattr(opt, "multiquery", False),
)
set_transformer_decoder(
model_spec.decoder,
variables,
with_encoder_attention=False,
)
for tgt_vocab in tgt_vocabs:
model_spec.register_vocabulary(tgt_vocab)
return model_spec
def get_vocabs(vocab):
if isinstance(vocab, dict) and "src" in vocab:
if isinstance(vocab["src"], list):
src_vocabs = [vocab["src"]]
tgt_vocabs = [vocab["tgt"]]
src_feats = vocab.get("src_feats")
if src_feats is not None:
src_vocabs.extend(src_feats.values())
else:
src_vocabs = [field[1].vocab.itos for field in vocab["src"].fields]
tgt_vocabs = [field[1].vocab.itos for field in vocab["tgt"].fields]
else:
# Compatibility with older models.
src_vocabs = [vocab[0][1].itos]
tgt_vocabs = [vocab[1][1].itos]
return src_vocabs, tgt_vocabs
class OpenNMTPyConverter(Converter):
"""Converts models generated by OpenNMT-py."""
def __init__(self, model_path: str):
"""Initializes the OpenNMT-py converter.
Arguments:
model_path: Path to the OpenNMT-py PyTorch model (.pt file).
"""
self._model_path = model_path
def _load(self):
import torch
checkpoint = torch.load(self._model_path, map_location="cpu")
src_vocabs, tgt_vocabs = get_vocabs(checkpoint["vocab"])
check_opt(checkpoint["opt"], num_source_embeddings=len(src_vocabs))
variables = checkpoint["model"]
variables.update(
{
"generator.%s" % key: value
for key, value in checkpoint["generator"].items()
}
)
if checkpoint["opt"].decoder_type == "transformer_lm":
return _get_model_spec_lm(
checkpoint["opt"],
variables,
src_vocabs,
tgt_vocabs,
num_source_embeddings=len(src_vocabs),
)
else:
return _get_model_spec_seq2seq(
checkpoint["opt"],
variables,
src_vocabs,
tgt_vocabs,
num_source_embeddings=len(src_vocabs),
)
def set_transformer_spec(spec, variables):
set_transformer_encoder(spec.encoder, variables)
set_transformer_decoder(spec.decoder, variables)
def set_transformer_encoder(spec, variables):
set_input_layers(spec, variables, "encoder")
set_layer_norm(spec.layer_norm, variables, "encoder.layer_norm")
for i, layer in enumerate(spec.layer):
set_transformer_encoder_layer(layer, variables, "encoder.transformer.%d" % i)
def set_transformer_decoder(spec, variables, with_encoder_attention=True):
set_input_layers(spec, variables, "decoder")
set_layer_norm(spec.layer_norm, variables, "decoder.layer_norm")
for i, layer in enumerate(spec.layer):
set_transformer_decoder_layer(
layer,
variables,
"decoder.transformer_layers.%d" % i,
with_encoder_attention=with_encoder_attention,
)
try:
set_linear(spec.projection, variables, "generator")
except KeyError:
# Compatibility when the generator was a nn.Sequential module.
set_linear(spec.projection, variables, "generator.0")
def set_input_layers(spec, variables, scope):
if hasattr(spec, "position_encodings"):
set_position_encodings(
spec.position_encodings,
variables,
"%s.embeddings.make_embedding.pe" % scope,
)
else:
# See https://github.com/OpenNMT/OpenNMT-py/issues/1722
spec.scale_embeddings = False
embeddings_specs = spec.embeddings
if not isinstance(embeddings_specs, list):
embeddings_specs = [embeddings_specs]
for i, embeddings_spec in enumerate(embeddings_specs):
set_embeddings(
embeddings_spec,
variables,
"%s.embeddings.make_embedding.emb_luts.%d" % (scope, i),
)
def set_transformer_encoder_layer(spec, variables, scope):
set_ffn(spec.ffn, variables, "%s.feed_forward" % scope)
set_multi_head_attention(
spec.self_attention,
variables,
"%s.self_attn" % scope,
self_attention=True,
)
set_layer_norm(spec.self_attention.layer_norm, variables, "%s.layer_norm" % scope)
def set_transformer_decoder_layer(spec, variables, scope, with_encoder_attention=True):
set_ffn(spec.ffn, variables, "%s.feed_forward" % scope)
set_multi_head_attention(
spec.self_attention,
variables,
"%s.self_attn" % scope,
self_attention=True,
)
set_layer_norm(spec.self_attention.layer_norm, variables, "%s.layer_norm_1" % scope)
if with_encoder_attention:
set_multi_head_attention(spec.attention, variables, "%s.context_attn" % scope)
set_layer_norm(spec.attention.layer_norm, variables, "%s.layer_norm_2" % scope)
def set_ffn(spec, variables, scope):
set_layer_norm(spec.layer_norm, variables, "%s.layer_norm" % scope)
set_linear(spec.linear_0, variables, "%s.w_1" % scope)
set_linear(spec.linear_1, variables, "%s.w_2" % scope)
if hasattr(spec, "linear_0_noact"):
set_linear(spec.linear_0_noact, variables, "%s.w_3" % scope)
def set_multi_head_attention(spec, variables, scope, self_attention=False):
if self_attention:
split_layers = [common_spec.LinearSpec() for _ in range(3)]
set_linear(split_layers[0], variables, "%s.linear_query" % scope)
set_linear(split_layers[1], variables, "%s.linear_keys" % scope)
set_linear(split_layers[2], variables, "%s.linear_values" % scope)
utils.fuse_linear(spec.linear[0], split_layers)
else:
set_linear(spec.linear[0], variables, "%s.linear_query" % scope)
split_layers = [common_spec.LinearSpec() for _ in range(2)]
set_linear(split_layers[0], variables, "%s.linear_keys" % scope)
set_linear(split_layers[1], variables, "%s.linear_values" % scope)
utils.fuse_linear(spec.linear[1], split_layers)
set_linear(spec.linear[-1], variables, "%s.final_linear" % scope)
if hasattr(spec, "relative_position_keys"):
spec.relative_position_keys = _get_variable(
variables, "%s.relative_positions_embeddings.weight" % scope
)
spec.relative_position_values = spec.relative_position_keys
def set_layer_norm(spec, variables, scope):
try:
spec.gamma = _get_variable(variables, "%s.weight" % scope)
except KeyError:
# Compatibility with older models using a custom LayerNorm module.
spec.gamma = _get_variable(variables, "%s.a_2" % scope)
spec.beta = _get_variable(variables, "%s.b_2" % scope)
try:
spec.beta = _get_variable(variables, "%s.bias" % scope)
except KeyError:
pass
def set_linear(spec, variables, scope):
spec.weight = _get_variable(variables, "%s.weight" % scope)
bias = variables.get("%s.bias" % scope)
if bias is not None:
spec.bias = bias.numpy()
def set_embeddings(spec, variables, scope):
spec.weight = _get_variable(variables, "%s.weight" % scope)
def set_position_encodings(spec, variables, scope):
spec.encodings = _get_variable(variables, "%s.pe" % scope).squeeze()
def _get_variable(variables, name):
return variables[name].numpy()
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--model_path", required=True, help="Model path.")
Converter.declare_arguments(parser)
args = parser.parse_args()
OpenNMTPyConverter(args.model_path).convert_from_args(args)
if __name__ == "__main__":
main()
| 12,461 | 34.504274 | 92 | py |
CTranslate2 | CTranslate2-master/python/tests/test_storage_view.py | import sys
import numpy as np
import pytest
import test_utils
import ctranslate2
def _assert_same_array(a, b):
assert a["shape"] == b["shape"]
assert a["data"] == b["data"]
assert a["typestr"] == b["typestr"]
@pytest.mark.parametrize(
"dtype,name",
[
(np.int8, "int8"),
(np.int16, "int16"),
(np.int32, "int32"),
(np.float16, "float16"),
(np.float32, "float32"),
],
)
def test_storageview_cpu(dtype, name):
x = np.ones((2, 4), dtype=dtype)
s = ctranslate2.StorageView.from_array(x)
_assert_same_array(s.__array_interface__, x.__array_interface__)
with pytest.raises(AttributeError, match="CPU"):
s.__cuda_array_interface__
assert str(s) == " 1 1 1 ... 1 1 1\n[cpu:0 %s storage viewed as 2x4]" % name
x[0][2] = 3
x[1][3] = 8
assert str(s) == " 1 1 3 ... 1 1 8\n[cpu:0 %s storage viewed as 2x4]" % name
y = np.array(x)
assert test_utils.array_equal(x, y)
@test_utils.require_cuda
def test_storageview_cuda():
import torch
x = torch.ones((2, 4), device="cuda")
s = ctranslate2.StorageView.from_array(x)
_assert_same_array(s.__cuda_array_interface__, x.__cuda_array_interface__)
with pytest.raises(AttributeError, match="CUDA"):
s.__array_interface__
assert str(s) == " 1 1 1 ... 1 1 1\n[cuda:0 float32 storage viewed as 2x4]"
x[0][2] = 3
x[1][3] = 8
assert str(s) == " 1 1 3 ... 1 1 8\n[cuda:0 float32 storage viewed as 2x4]"
y = torch.as_tensor(s, device="cuda")
_assert_same_array(s.__cuda_array_interface__, y.__cuda_array_interface__)
def test_storageview_strides():
x = np.ones((2, 4), dtype=np.float32)
x_t = x.transpose()
with pytest.raises(ValueError, match="contiguous"):
ctranslate2.StorageView.from_array(x_t)
def test_storageview_readonly():
x = np.ones((2, 4), dtype=np.float32)
x.flags.writeable = False
with pytest.raises(ValueError, match="read-only"):
ctranslate2.StorageView.from_array(x)
def test_storageview_reference():
x = np.ones((2, 4), dtype=np.float32)
refcount_before = sys.getrefcount(x)
s = ctranslate2.StorageView.from_array(x)
refcount_after = sys.getrefcount(x)
assert refcount_after == refcount_before + 1
del s
refcount_after_del = sys.getrefcount(x)
assert refcount_after_del == refcount_before
| 2,387 | 25.533333 | 80 | py |
CTranslate2 | CTranslate2-master/python/tests/test_spec.py | import numpy as np
import pytest
import test_utils
import ctranslate2
from ctranslate2.converters import utils as conversion_utils
from ctranslate2.specs import common_spec, transformer_spec
from ctranslate2.specs.model_spec import OPTIONAL, index_spec
def test_layer_spec_validate():
class SubSpec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.a = np.ones([5], dtype=np.float16)
class Spec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.a = np.zeros([5], dtype=np.float32)
self.b = np.zeros([5], dtype=np.float16)
self.c = np.zeros([5], dtype=np.int32)
self.d = OPTIONAL
self.e = SubSpec()
self.f = True
self.g = "hello"
spec = Spec()
spec.validate()
assert spec.a.dtype == "float32"
assert spec.b.dtype == "float16"
assert spec.c.dtype == "int32"
assert spec.d == OPTIONAL
assert spec.e.a.dtype == "float16"
assert test_utils.array_equal(spec.f.numpy(), np.int8(1))
assert test_utils.array_equal(
spec.g.numpy(), np.array([104, 101, 108, 108, 111], dtype=np.int8)
)
with pytest.raises(AttributeError, match="Attribute z does not exist"):
spec.z = True
def test_layer_spec_validate_unset():
class SubSpec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.attr_1 = None
class Spec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.attr_1 = np.zeros([5], dtype=np.float32)
self.attr_2 = None
self.attr_3 = SubSpec()
spec = Spec()
with pytest.raises(ValueError, match="attr_2\nattr_3.attr_1"):
spec.validate()
def test_layer_spec_optimize():
class SubSpec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.a = np.ones([6], dtype=np.float32)
self.weight = np.ones([5, 4], dtype=np.float32)
self.weight_scale = OPTIONAL
class Spec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.a = np.ones([5], dtype=np.float32)
self.b = np.ones([5], dtype=np.float32)
self.c = np.zeros([5], dtype=np.int32)
self.d = np.dtype("float32").type(3.14)
self.sub = SubSpec()
spec = Spec()
spec.validate()
spec.optimize(quantization="int16")
assert spec.a.dtype == "float32"
assert spec.b == "a"
assert spec.c.dtype == "int32"
assert spec.d.dtype == "float32"
assert spec.sub.weight.dtype == "int16"
assert spec.sub.weight_scale.dtype == "float32"
spec = Spec()
spec.validate()
spec.optimize(quantization="float16")
assert spec.a.dtype == "float16"
assert spec.b == "a"
assert spec.c.dtype == "int32"
assert spec.d.dtype == "float32"
assert spec.sub.weight.dtype == "float16"
assert spec.sub.a.dtype == "float16"
def test_int8_quantization():
class Spec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.weight = np.array([[-10, -3, 5, 2], [0, 0, 0, 0]], dtype=np.float32)
self.weight_scale = OPTIONAL
spec = Spec()
spec.validate()
spec.optimize(quantization="int8")
assert test_utils.array_equal(
spec.weight.numpy(),
np.array([[-127, -38, 64, 25], [0, 0, 0, 0]], dtype=np.int8),
)
assert test_utils.array_equal(
spec.weight_scale.numpy(), np.array([12.7, 1], dtype=np.float32)
)
@pytest.mark.parametrize(
"quantization,expected_weight,expected_weight_scale,expected_bias",
[
(
None,
np.array([[-10, -3, 5, 2]], dtype=np.float32),
None,
np.array([4], dtype=np.float32),
),
(
"float16",
np.array([[-10, -3, 5, 2]], dtype=np.float16),
None,
np.array([4], dtype=np.float16),
),
(
"int8",
np.array([[-127, -38, 64, 25]], dtype=np.int8),
np.array([12.7], dtype=np.float32),
np.array([4], dtype=np.float32),
),
(
"int8_float16",
np.array([[-127, -38, 64, 25]], dtype=np.int8),
np.array([12.7], dtype=np.float32),
np.array([4], dtype=np.float16),
),
(
"int16",
np.array([[-1024, -307, 512, 205]], dtype=np.int16),
np.float32(102.4),
np.array([4], dtype=np.float32),
),
],
)
def test_fp16_weights(
quantization, expected_weight, expected_weight_scale, expected_bias
):
class Spec(ctranslate2.specs.LayerSpec):
def __init__(self, weight, bias):
self.weight = weight
self.weight_scale = OPTIONAL
self.bias = bias
weight = np.array([[-10, -3, 5, 2]], dtype=np.float16)
bias = np.array([4], dtype=np.float16)
spec = Spec(weight, bias)
spec.validate()
spec.optimize(quantization=quantization)
assert test_utils.array_equal(spec.weight.numpy(), expected_weight)
assert test_utils.array_equal(spec.bias.numpy(), expected_bias)
# Check the weights were not copied or converted.
if quantization == "float16":
assert spec.weight.numpy() is weight
assert spec.bias.numpy() is bias
elif quantization == "int8_float16":
assert spec.bias.numpy() is bias
if expected_weight_scale is None:
assert spec.weight_scale == OPTIONAL
else:
assert test_utils.array_equal(spec.weight_scale.numpy(), expected_weight_scale)
def test_index_spec():
spec = ctranslate2.specs.TransformerSpec.from_config(6, 8)
assert isinstance(
index_spec(spec, "encoder/layer_5"),
transformer_spec.TransformerEncoderLayerSpec,
)
assert isinstance(
index_spec(spec, "encoder/layer_5/ffn"), transformer_spec.FeedForwardSpec
)
def test_fuse_linear_no_bias():
layers = []
for _ in range(3):
spec = common_spec.LinearSpec()
spec.weight = np.zeros([64, 64], dtype=np.float32)
layers.append(spec)
spec = common_spec.LinearSpec()
conversion_utils.fuse_linear(spec, layers)
assert spec.weight.shape[0] == 64 * 3
assert spec.bias == OPTIONAL
spec = common_spec.LinearSpec()
layers[1].bias = np.ones([64], dtype=np.float32)
conversion_utils.fuse_linear(spec, layers)
assert test_utils.array_equal(spec.bias[:64], np.zeros([64], dtype=np.float32))
assert test_utils.array_equal(spec.bias[64:128], np.ones([64], dtype=np.float32))
assert test_utils.array_equal(spec.bias[128:], np.zeros([64], dtype=np.float32))
@test_utils.skip_on_windows
def test_fuse_linear_torch():
import torch
layers = []
for _ in range(3):
spec = common_spec.LinearSpec()
spec.weight = torch.zeros([64, 64], dtype=torch.float32)
spec.bias = torch.zeros([64], dtype=torch.float32)
layers.append(spec)
spec = common_spec.LinearSpec()
conversion_utils.fuse_linear(spec, layers)
assert spec.weight.shape[0] == 64 * 3
assert spec.bias.shape[0] == 64 * 3
@test_utils.skip_on_windows
def test_smooth_activation_torch():
import torch
layer_norm = common_spec.LayerNormSpec()
layer_norm.beta = torch.rand([64], dtype=torch.float16)
layer_norm.gamma = torch.rand([64], dtype=torch.float16)
linear = common_spec.LinearSpec()
linear.weight = torch.rand([64, 64], dtype=torch.float16)
activation_scales = torch.rand([64], dtype=torch.float32)
# Just check that no error is raised.
conversion_utils.smooth_activation(layer_norm, linear, activation_scales)
@test_utils.skip_on_windows
@pytest.mark.parametrize("variable_dtype", ["float32", "float16", "bfloat16"])
@pytest.mark.parametrize(
"quantization,expected_weight_dtype,expected_bias_dtype",
[
(None, "float32", "float32"),
("int8", "int8", "float32"),
("int8_float16", "int8", "float16"),
("int8_bfloat16", "int8", "bfloat16"),
("int16", "int16", "float32"),
("float16", "float16", "float16"),
("bfloat16", "bfloat16", "bfloat16"),
],
)
def test_torch_variables(
tmp_dir, variable_dtype, quantization, expected_weight_dtype, expected_bias_dtype
):
import torch
variable_dtype = getattr(torch, variable_dtype)
class TorchModel(ctranslate2.specs.ModelSpec):
def __init__(self):
super().__init__()
self.dense = common_spec.LinearSpec()
self.dense.weight = torch.ones([16, 4], dtype=variable_dtype)
self.dense.bias = torch.ones([16], dtype=variable_dtype)
@property
def name(self):
return "TorchModel"
model = TorchModel()
model.validate()
model.optimize(quantization)
variables = model.variables()
assert variables["dense/weight"].dtype == expected_weight_dtype
assert variables["dense/bias"].dtype == expected_bias_dtype
model.save(tmp_dir)
| 8,986 | 30.313589 | 87 | py |
CTranslate2 | CTranslate2-master/python/tests/test_transformers.py | import inspect
import json
import os
import shutil
import numpy as np
import pytest
import test_utils
import ctranslate2
@pytest.fixture
def clear_transformers_cache():
"""Clears the Transformers model cache after each test when running in a CI."""
yield
clear_transformers_cache_in_ci()
def clear_transformers_cache_in_ci():
import transformers
if os.environ.get("CI") == "true":
shutil.rmtree(transformers.utils.default_cache_path)
_TRANSFORMERS_TRANSLATION_TESTS = [
(
"Helsinki-NLP/opus-mt-en-de",
"▁Hello ▁world ! </s>",
"",
"▁Hallo ▁Welt !",
dict(),
),
(
"Helsinki-NLP/opus-mt-en-roa",
">>ind<< ▁The ▁Prime ▁Minister ▁is ▁coming ▁back ▁tomorrow . </s>",
"",
"▁Per da na ▁Men teri ▁akan ▁kembali ▁besok .",
dict(),
),
(
"Helsinki-NLP/opus-mt-mul-en",
"▁Bon jo ur ▁le ▁mo nde </s>",
"",
"▁Welcome ▁to ▁the ▁World",
dict(),
),
(
"facebook/m2m100_418M",
"__en__ ▁Hello ▁world ! </s>",
"__de__",
"__de__ ▁Hallo ▁der ▁Welt !",
dict(),
),
(
"facebook/mbart-large-50-many-to-many-mmt",
"en_XX ▁Hello ▁world ! </s>",
"de_DE",
"de_DE ▁Hallo ▁Welt !",
dict(),
),
(
"facebook/mbart-large-en-ro",
"▁UN ▁Chief ▁Say s ▁There ▁Is ▁No ▁Militar y ▁Solution ▁in ▁Syria </s> en_XX",
"ro_RO",
"▁Şe ful ▁ONU ▁de cla ră ▁că ▁nu ▁există ▁o ▁solu ţie ▁militar ă ▁în ▁Siria",
dict(),
),
(
"facebook/bart-base",
"<s> UN ĠChief ĠSays ĠThere ĠIs ĠNo <mask> Ġin ĠSyria </s>",
"",
"<s> UN ĠChief ĠSays ĠThere ĠIs ĠNo ĠWar Ġin ĠSyria",
dict(),
),
(
"google/pegasus-xsum",
"▁PG & E ▁stated ▁it ▁scheduled ▁the ▁blackout s ▁in ▁response ▁to ▁forecasts "
"▁for ▁high ▁winds ▁amid ▁dry ▁conditions . ▁The ▁aim ▁is ▁to ▁reduce ▁the "
"▁risk ▁of ▁wildfires . ▁Nearly ▁800 ▁thousand ▁customers ▁were ▁scheduled ▁to "
"▁be ▁affected ▁by ▁the ▁shutoff s ▁which ▁were ▁expected ▁to ▁last ▁through "
"▁at ▁least ▁midday ▁tomorrow . </s>",
"",
"▁California ' s ▁largest ▁electricity ▁provider ▁has ▁turned ▁off ▁power ▁to "
"▁hundreds ▁of ▁thousands ▁of ▁customers .",
dict(length_penalty=0.6),
),
(
"facebook/nllb-200-distilled-600M",
["▁Hello ▁world ! </s> eng_Latn", "</s> eng_Latn"],
["fra_Latn", "fra_Latn"],
["fra_Latn ▁Bon jour ▁le ▁monde ▁!", "fra_Latn"],
dict(),
),
(
"t5-small",
"▁translate ▁English ▁to ▁German : ▁The ▁house ▁is ▁wonderful . </s>",
"",
"▁Das ▁Haus ▁ist ▁wunderbar .",
dict(),
),
(
"ml6team/mt5-small-german-query-generation",
"▁Das ▁Lama ▁( L ama ▁glam a ) ▁ist ▁eine ▁Art ▁der ▁Kam ele . "
"▁Es ▁ist ▁in ▁den ▁süd amerikanische n ▁And en ▁ver breite t ▁und "
"▁eine ▁vom ▁Guan ako ▁ab sta mmende ▁ Haustier form . </s>",
"",
"▁Was ▁ist ▁Lama ▁glam a ?",
dict(),
),
]
@test_utils.only_on_linux
@pytest.mark.parametrize(
"model,source_tokens,target_tokens,expected_tokens,kwargs",
_TRANSFORMERS_TRANSLATION_TESTS,
ids=[args[0] for args in _TRANSFORMERS_TRANSLATION_TESTS],
)
def test_transformers_translation(
clear_transformers_cache,
tmp_dir,
model,
source_tokens,
target_tokens,
expected_tokens,
kwargs,
):
converter = ctranslate2.converters.TransformersConverter(model)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
if not isinstance(expected_tokens, list):
expected_tokens = [expected_tokens]
if not isinstance(source_tokens, list):
source_tokens = [source_tokens]
if target_tokens and not isinstance(target_tokens, list):
target_tokens = [target_tokens]
translator = ctranslate2.Translator(output_dir)
results = translator.translate_batch(
[line.split() for line in source_tokens],
[line.split() for line in target_tokens] if target_tokens else None,
**kwargs,
)
output_tokens = [" ".join(result.hypotheses[0]) for result in results]
assert output_tokens == expected_tokens
_TRANSFORMERS_GENERATION_TESTS = [
(
"bigcode/tiny_starcoder_py",
(
"<fim_prefix> def Ġprint _ one _ two _ three (): ĊĠĠĠ Ġprint (' one ') "
"ĊĠĠĠĠ <fim_suffix> ĊĠĠĠ Ġprint (' three ') <fim_middle>"
),
26,
(
"<fim_prefix> def Ġprint _ one _ two _ three (): ĊĠĠĠ Ġprint (' one ') "
"ĊĠĠĠĠ <fim_suffix> ĊĠĠĠ Ġprint (' three ') <fim_middle>"
" print (' two ')"
),
),
(
"Salesforce/codegen-350M-mono",
"def Ġhello _ name ( name ):",
25,
"def Ġhello _ name ( name ):"
' Ċ print ( f " Hello Ġ{ name } ") Ċ Ċ hello _ name (" John ")',
),
(
"gpt2",
"<|endoftext|>",
10,
"Ċ The Ġfirst Ġtime ĠI Ġsaw Ġthe Ġnew Ġversion Ġof",
),
(
"facebook/opt-350m",
"</s>",
10,
"Ċ The Ġfollowing Ġis Ġa Ġlist Ġof Ġthe Ġmost Ġpopular",
),
(
"microsoft/DialoGPT-medium",
"Hello <|endoftext|>",
100,
"Hello <|endoftext|> Hello Ġ! Ġ: D",
),
(
"bigscience/bloom-560m",
"Hello , ĠI Ġam",
20,
"Hello , ĠI Ġam Ġa Ġnew bie Ġin Ġthe Ġworld Ġof Ġweb Ġdesign Ġand ĠI Ġam "
"Ġlooking Ġfor Ġa Ġweb Ġdeveloper",
),
]
@test_utils.only_on_linux
@pytest.mark.parametrize(
"model,start_tokens,max_length,expected_tokens",
_TRANSFORMERS_GENERATION_TESTS,
ids=[args[0] for args in _TRANSFORMERS_GENERATION_TESTS],
)
def test_transformers_generation(
clear_transformers_cache,
tmp_dir,
model,
start_tokens,
max_length,
expected_tokens,
):
converter = ctranslate2.converters.TransformersConverter(model)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
results = generator.generate_batch([start_tokens.split()], max_length=max_length)
output_tokens = " ".join(results[0].sequences[0])
assert output_tokens == expected_tokens
# Test empty inputs.
assert generator.generate_batch([]) == []
with pytest.raises(ValueError, match="start token"):
generator.generate_batch([[]])
@test_utils.only_on_linux
def test_transformers_marianmt_vocabulary(clear_transformers_cache, tmp_dir):
converter = ctranslate2.converters.TransformersConverter(
"Helsinki-NLP/opus-mt-en-de"
)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
vocabulary_path = os.path.join(output_dir, "shared_vocabulary.json")
with open(vocabulary_path, encoding="utf-8") as vocabulary_file:
vocab = json.load(vocabulary_file)
assert vocab[-1] != "<pad>"
@test_utils.only_on_linux
@pytest.mark.parametrize("beam_size", [1, 2])
def test_transformers_marianmt_disable_unk(
clear_transformers_cache, tmp_dir, beam_size
):
converter = ctranslate2.converters.TransformersConverter(
"Helsinki-NLP/opus-mt-en-roa"
)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
tokens = ">>ind<< ▁The ▁Prime <unk> ▁is ▁coming ▁back ▁tomorrow . </s>".split()
translator = ctranslate2.Translator(output_dir)
output = translator.translate_batch([tokens], beam_size=beam_size, disable_unk=True)
assert "<unk>" not in output[0].hypotheses[0]
@test_utils.only_on_linux
@test_utils.on_available_devices
def test_transformers_bert(clear_transformers_cache, tmp_dir, device):
import torch
import transformers
text = ["Hello world!", "Hello, my dog is cute"]
model_name = "bert-base-uncased"
model = transformers.BertModel.from_pretrained(model_name)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
inputs = tokenizer(text, return_tensors="pt", padding=True)
inputs.to(device)
model.to(device)
with torch.no_grad():
outputs = model(**inputs)
mask = inputs.attention_mask.unsqueeze(-1).cpu().numpy()
ref_last_hidden_state = outputs.last_hidden_state.cpu().numpy()
ref_pooler_output = outputs.pooler_output.cpu().numpy()
converter = ctranslate2.converters.TransformersConverter(model_name)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
encoder = ctranslate2.Encoder(output_dir, device=device)
ids = [tokenizer(t).input_ids for t in text]
outputs = encoder.forward_batch(ids)
last_hidden_state = _to_numpy(outputs.last_hidden_state, device)
assert last_hidden_state.shape == ref_last_hidden_state.shape
last_hidden_state *= mask
ref_last_hidden_state *= mask
np.testing.assert_array_almost_equal(
last_hidden_state, ref_last_hidden_state, decimal=5
)
pooler_output = _to_numpy(outputs.pooler_output, device)
assert pooler_output.shape == ref_pooler_output.shape
np.testing.assert_array_almost_equal(pooler_output, ref_pooler_output, decimal=5)
def _to_numpy(storage, device):
import torch
return (
np.array(storage)
if device == "cpu"
else torch.as_tensor(storage, device=device).cpu().numpy()
)
@test_utils.only_on_linux
def test_transformers_gptbigcode(clear_transformers_cache, tmp_dir):
import transformers
_check_generator_logits(
tmp_dir,
"hf-internal-testing/tiny-random-GPTBigCodeForCausalLM",
transformers.GPTBigCodeForCausalLM,
transformers.AutoTokenizer,
"hello",
)
def _check_generator_logits(
tmp_dir, model_name, hf_model_class, hf_tokenizer_class, input_text
):
import torch
model = hf_model_class.from_pretrained(model_name)
tokenizer = hf_tokenizer_class.from_pretrained(model_name)
inputs = tokenizer(input_text, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs, labels=inputs["input_ids"])
ref_logits = outputs.logits.numpy()
converter = ctranslate2.converters.TransformersConverter(model_name)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(input_text))
logits = generator.forward_batch([tokens])
logits = np.array(logits)
assert logits.shape == ref_logits.shape
np.testing.assert_array_almost_equal(logits, ref_logits)
class TestGeneration:
@classmethod
def teardown_class(cls):
clear_transformers_cache_in_ci()
@test_utils.only_on_linux
def test_transformers_lm_scoring(self, tmp_dir):
converter = ctranslate2.converters.TransformersConverter("gpt2")
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
tokens = "Ċ The Ġfirst Ġtime ĠI Ġsaw Ġthe Ġnew Ġversion Ġof".split()
output = generator.score_batch([tokens])[0]
assert output.tokens == tokens[1:]
assert len(output.log_probs) == len(output.tokens)
# Test empty inputs.
assert generator.score_batch([]) == []
output = generator.score_batch([[], tokens])[0]
assert not output.tokens
assert not output.log_probs
output = generator.score_batch([["<|endoftext|>"]])[0]
assert not output.tokens
assert not output.log_probs
@test_utils.only_on_linux
@test_utils.on_available_devices
@pytest.mark.parametrize("return_log_probs", [True, False])
@pytest.mark.parametrize("tensor_input", [True, False])
def test_transformers_lm_forward(
self, tmp_dir, device, return_log_probs, tensor_input
):
import torch
import transformers
model_name = "gpt2"
model = transformers.GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
converter = ctranslate2.converters.TransformersConverter(model_name)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir, device=device)
text = ["Hello world!"]
with torch.no_grad():
inputs = tokenizer(text, return_tensors="pt")
inputs.to(device)
model.to(device)
output = model(**inputs)
ref_output = output.logits
if return_log_probs:
ref_output = torch.nn.functional.log_softmax(ref_output, dim=-1)
ref_output = ref_output.cpu().numpy()
kwargs = dict(return_log_probs=return_log_probs)
if tensor_input:
inputs = tokenizer(text, return_length=True, return_tensors="pt")
inputs.to(device)
ids = inputs.input_ids.to(torch.int32)
lengths = inputs.length.to(torch.int32)
if device == "cpu":
ids = ids.numpy()
lengths = lengths.numpy()
ids = ctranslate2.StorageView.from_array(ids)
lengths = ctranslate2.StorageView.from_array(lengths)
with pytest.raises(ValueError, match="lengths"):
generator.forward_batch(ids, **kwargs)
output = generator.forward_batch(ids, lengths, **kwargs)
else:
ids = tokenizer(text).input_ids
output = generator.forward_batch(ids, **kwargs)
if device == "cpu":
output = np.array(output)
else:
output = torch.as_tensor(output, device=device).cpu().numpy()
assert output.shape == ref_output.shape
np.testing.assert_allclose(output, ref_output, rtol=1e-2)
@test_utils.only_on_linux
def test_transformers_generator_on_iterables(self, tmp_dir):
converter = ctranslate2.converters.TransformersConverter("gpt2")
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
start_tokens = ["<|endoftext|>"]
tokens = "Ċ The Ġfirst Ġtime ĠI Ġsaw Ġthe Ġnew Ġversion Ġof".split()
output = next(generator.generate_iterable(iter([start_tokens]), max_length=10))
assert output.sequences[0] == tokens
output = next(generator.score_iterable(iter([tokens])))
assert output.tokens == tokens[1:]
assert len(output.log_probs) == len(output.tokens)
# Test empty iterables.
with pytest.raises(StopIteration):
next(generator.score_iterable(iter([])))
with pytest.raises(StopIteration):
next(generator.generate_iterable(iter([])))
@test_utils.only_on_linux
def test_transformers_generator_suppress_sequences(self, tmp_dir):
converter = ctranslate2.converters.TransformersConverter("gpt2")
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
output = generator.generate_batch(
[["<|endoftext|>"]],
max_length=10,
suppress_sequences=[["Ġfirst", "Ġtime"]],
)
expected_tokens = "Ċ The Ġfirst Ġof Ġthe Ġthree Ġnew Ġseries Ġof Ġthe".split()
assert output[0].sequences[0] == expected_tokens
@test_utils.only_on_linux
@pytest.mark.parametrize("beam_size", [1, 2])
def test_transformers_generator_ignore_prompt(self, tmp_dir, beam_size):
converter = ctranslate2.converters.TransformersConverter("gpt2")
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
max_length = 20
tokens = "Ċ The Ġfirst Ġtime ĠI".split()
result_wo_prompt = generator.generate_batch(
[tokens],
beam_size=beam_size,
max_length=max_length - len(tokens),
return_scores=True,
include_prompt_in_result=False,
)[0]
result_w_prompt = generator.generate_batch(
[tokens],
beam_size=beam_size,
max_length=max_length - 1,
return_scores=True,
)[0]
assert len(result_w_prompt.sequences[0]) == max_length
assert tokens + result_wo_prompt.sequences[0] == result_w_prompt.sequences[0]
cum_score_wo_prompt = result_wo_prompt.scores[0] * (
len(result_wo_prompt.sequences[0])
)
cum_score_w_prompt = result_w_prompt.scores[0] * (
len(result_w_prompt.sequences[0]) - 1
)
assert cum_score_wo_prompt == pytest.approx(cum_score_w_prompt, abs=1e-4)
@test_utils.only_on_linux
@pytest.mark.parametrize("beam_size", [1, 2])
def test_transformers_generator_ignore_prompt_batch(self, tmp_dir, beam_size):
converter = ctranslate2.converters.TransformersConverter("gpt2")
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
new_tokens = 2
prompt = [
"Ċ The Ġfirst Ġtime ĠI".split(),
"Ċ The Ġfirst".split(),
]
results = generator.generate_batch(
prompt,
beam_size=beam_size,
min_length=new_tokens,
max_length=new_tokens,
include_prompt_in_result=False,
)
for tokens, result in zip(prompt, results):
assert len(result.sequences[0]) == new_tokens
@test_utils.only_on_linux
def test_transformers_generator_static_prompt(self, tmp_dir):
converter = ctranslate2.converters.TransformersConverter("gpt2")
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
max_length = 20
prompt = "Ċ The Ġfirst Ġtime ĠI".split()
expected_result = generator.generate_batch(
[prompt],
max_length=max_length,
include_prompt_in_result=False,
)[0]
result = generator.generate_batch(
[[expected_result.sequences[0][0]]],
max_length=max_length - 1,
static_prompt=prompt,
)[0]
assert result.sequences[0] == expected_result.sequences[0]
result = generator.generate_batch(
[expected_result.sequences[0][:2]],
max_length=max_length - 2,
static_prompt=prompt,
include_prompt_in_result=False,
)[0]
assert (
expected_result.sequences[0][:2] + result.sequences[0]
== expected_result.sequences[0]
)
batch_results = generator.generate_batch(
[[expected_result.sequences[0][0]], [expected_result.sequences[0][0]]],
max_length=max_length - 1,
static_prompt=prompt,
)
assert batch_results[0].sequences[0] == expected_result.sequences[0]
assert batch_results[1].sequences[0] == expected_result.sequences[0]
@test_utils.only_on_linux
@pytest.mark.parametrize("return_log_prob", [True, False])
def test_transformers_generator_token_streaming(self, tmp_dir, return_log_prob):
converter = ctranslate2.converters.TransformersConverter("gpt2")
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
generator = ctranslate2.Generator(output_dir)
max_length = 20
prompt = "Ċ The Ġfirst Ġtime ĠI".split()
expected_result = generator.generate_batch(
[prompt],
max_length=max_length,
return_scores=True,
include_prompt_in_result=False,
)[0]
step_results = generator.generate_tokens(
prompt, max_length=max_length, return_log_prob=return_log_prob
)
assert inspect.isgenerator(step_results)
tokens = []
ids = []
cum_log_probs = 0
for step_result in step_results:
assert isinstance(step_result, ctranslate2.GenerationStepResult)
tokens.append(step_result.token)
ids.append(step_result.token_id)
if return_log_prob:
cum_log_probs += step_result.log_prob
else:
assert step_result.log_prob is None
assert tokens == expected_result.sequences[0]
assert ids == expected_result.sequences_ids[0]
if return_log_prob:
assert cum_log_probs / len(ids) == pytest.approx(
expected_result.scores[0], abs=1e-5
)
class TestWhisper:
@classmethod
def teardown_class(cls):
clear_transformers_cache_in_ci()
@test_utils.only_on_linux
@test_utils.on_available_devices
@pytest.mark.parametrize(
"model_name,prompts,expected_transcriptions,expected_no_speech_probs",
[
(
"openai/whisper-tiny",
[
[
"<|startoftranscript|>",
"<|en|>",
"<|transcribe|>",
"<|notimestamps|>",
],
[
"<|startoftranscript|>",
"<|en|>",
"<|transcribe|>",
"<|notimestamps|>",
"ĠAnd",
"Ġthus",
"Ġmy",
],
],
[
" Mr. Quilter is the apostle of the middle classes and we are glad"
" to welcome his gospel.",
" And thus my fellow Americans ask not what your country can do for you,"
" ask what you can do for your country.",
],
[
pytest.approx(0.0022832120303064585, abs=1e-4),
pytest.approx(0.06885894387960434, abs=1e-3),
],
),
(
"openai/whisper-tiny",
[
["<|startoftranscript|>", "<|en|>", "<|transcribe|>"],
["<|startoftranscript|>", "<|en|>", "<|transcribe|>"],
],
[
" Mr. Quilter is the apostle of the middle classes and we are glad"
" to welcome his gospel.",
" And so, my fellow Americans, ask not what your country can do for you,"
" ask what you can do for your country.",
],
[
pytest.approx(0.0022832120303064585, abs=1e-4),
pytest.approx(0.06885894387960434, abs=1e-3),
],
),
(
"openai/whisper-tiny.en",
[["<|startoftranscript|>"], ["<|startoftranscript|>"]],
[
" Mr. Quilter is the apostle of the middle classes, and we are glad"
" to welcome his gospel.",
" And so, my fellow Americans ask not what your country can do for you"
" ask what you can do for your country.",
],
[
pytest.approx(0.02644546702504158, abs=1e-4),
pytest.approx(0.062380101531744, abs=1e-3),
],
),
],
)
def test_transformers_whisper(
self,
tmp_dir,
device,
model_name,
prompts,
expected_transcriptions,
expected_no_speech_probs,
):
import transformers
converter = ctranslate2.converters.TransformersConverter(model_name)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
audio_paths = [
os.path.join(test_utils.get_data_dir(), "audio", "mr_quilter.npy"),
os.path.join(test_utils.get_data_dir(), "audio", "jfk.npy"),
]
audio = list(map(np.load, audio_paths))
processor = transformers.WhisperProcessor.from_pretrained(model_name)
def _get_features(audio):
# Pad after computing the log-Mel spectrogram to match the openai/whisper behavior.
inputs = processor(audio, padding=False, sampling_rate=16000)
features = inputs.input_features[0]
features = np.pad(features, [(0, 0), (0, 3000 - features.shape[-1])])
return features
features = np.stack(list(map(_get_features, audio)))
features = ctranslate2.StorageView.from_array(features)
model = ctranslate2.models.Whisper(output_dir, device=device)
assert model.is_multilingual == (not model_name.endswith(".en"))
if model.is_multilingual:
for result in model.detect_language(features):
best_lang, best_prob = result[0]
assert best_lang == "<|en|>"
assert best_prob > 0.9
else:
with pytest.raises(RuntimeError, match="multilingual"):
model.detect_language(features)
results = model.generate(
features,
prompts,
beam_size=2,
num_hypotheses=2,
return_no_speech_prob=True,
)
timestamp_begin = (
processor.tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1
)
for prompt, result, expected_transcription, expected_no_speech_prob in zip(
prompts, results, expected_transcriptions, expected_no_speech_probs
):
assert len(result.sequences_ids) == 2
assert result.no_speech_prob == expected_no_speech_prob
for tokens in result.sequences_ids:
if "<|notimestamps|>" in prompt:
assert all(token < timestamp_begin for token in tokens)
else:
assert tokens[0] >= timestamp_begin
assert tokens[-1] >= timestamp_begin
assert tokens[-1] > tokens[0]
token_ids = list(
filter(lambda token: token < timestamp_begin, result.sequences_ids[0])
)
transcription = processor.decode(token_ids)
assert transcription == expected_transcription
@test_utils.only_on_linux
@test_utils.on_available_devices
@pytest.mark.parametrize(
"test_names", [["jfk"], ["jfk", "jfk"], ["mr_quilter", "jfk"]]
)
def test_transformers_whisper_align(self, tmp_dir, device, test_names):
import transformers
test_cases = []
audio = []
test_dir = os.path.join(test_utils.get_data_dir(), "audio")
for name in test_names:
audio_path = os.path.join(test_dir, "%s.npy" % name)
audio.append(np.load(audio_path))
test_case_path = os.path.join(test_dir, "%s_alignments.json" % name)
with open(test_case_path) as test_case_file:
test_cases.append(json.load(test_case_file))
model_name = "openai/whisper-tiny.en"
converter = ctranslate2.converters.TransformersConverter(model_name)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
processor = transformers.WhisperProcessor.from_pretrained(model_name)
inputs = processor(audio, return_tensors="np", sampling_rate=16000)
features = ctranslate2.StorageView.from_array(inputs.input_features)
model = ctranslate2.models.Whisper(output_dir, device=device)
results = model.align(
features,
[50257],
[test_case["text_tokens"] for test_case in test_cases],
[test_case["num_frames"] for test_case in test_cases],
)
for result, test_case in zip(results, test_cases):
assert np.sum(result.text_token_probs) == pytest.approx(
test_case["expected_text_token_probs_sum"], abs=1e-3
)
assert result.alignments == [
tuple(pair) for pair in test_case["expected_alignments"]
]
@test_utils.only_on_linux
@test_utils.on_available_devices
def test_transformers_whisper_encode(self, tmp_dir, device):
import transformers
model_name = "openai/whisper-tiny.en"
converter = ctranslate2.converters.TransformersConverter(model_name)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
audio_path = os.path.join(test_utils.get_data_dir(), "audio", "jfk.npy")
audio = np.load(audio_path)
processor = transformers.WhisperProcessor.from_pretrained(model_name)
inputs = processor(audio, sampling_rate=16000)
features = inputs.input_features[0]
features = np.expand_dims(features, 0)
features = ctranslate2.StorageView.from_array(features)
model = ctranslate2.models.Whisper(output_dir, device=device)
encoded = model.encode(features)
prompts = [["<|startoftranscript|>", "<|notimestamps|>"]]
result = model.generate(encoded, prompts)[0]
transcription = processor.decode(result.sequences_ids[0])
assert transcription == (
" And so my fellow Americans ask not what your country can do for you, "
"ask what you can do for your country."
)
@test_utils.only_on_linux
def test_transformers_whisper_invalid_shape(self, tmp_dir):
import transformers
model_name = "openai/whisper-tiny"
converter = ctranslate2.converters.TransformersConverter(model_name)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
audio_path = os.path.join(test_utils.get_data_dir(), "audio", "jfk.npy")
audio = np.load(audio_path)
processor = transformers.WhisperProcessor.from_pretrained(model_name)
inputs = processor(
audio, padding=False, return_tensors="np", sampling_rate=16000
)
features = ctranslate2.StorageView.from_array(inputs.input_features)
model = ctranslate2.models.Whisper(output_dir)
with pytest.raises(ValueError) as exception_info:
model.detect_language(features)
error_message = str(exception_info.value)
assert "(1, 80, 3000)" in error_message
assert "(1, 80, 1100)" in error_message
@test_utils.only_on_linux
def test_transformers_whisper_include_tokenizer_json(self, tmp_dir):
model_name = "openai/whisper-tiny"
converter = ctranslate2.converters.TransformersConverter(
model_name, copy_files=["tokenizer.json"]
)
output_dir = str(tmp_dir.join("ctranslate2_model"))
output_dir = converter.convert(output_dir)
assert os.path.isfile(os.path.join(output_dir, "tokenizer.json"))
| 31,550 | 33.747797 | 95 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/main.py | # @Author : bamtercelboo
# @Datetime : 2018/1/30 19:50
# @File : main_hyperparams.py.py
# @Last Modify Time : 2018/1/30 19:50
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : main_hyperparams.py.py
FUNCTION : main
"""
import pdb
import argparse
import datetime
import Config.config as configurable
from DataUtils.mainHelp import *
from DataUtils.Alphabet import *
from test import load_test_data
from test import T_Inference
from trainer import Train
import random
# solve default encoding problem
from imp import reload
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
# random seed
torch.manual_seed(seed_num)
random.seed(seed_num)
def start_train(train_iter, dev_iter, test_iter, model, config):
"""
:param train_iter: train batch data iterator
:param dev_iter: dev batch data iterator
:param test_iter: test batch data iterator
:param model: nn model
:param config: config
:return: None
"""
t = Train(train_iter=train_iter, dev_iter=dev_iter, test_iter=test_iter, model=model, config=config)
t.train()
print("Finish Train.")
def start_test(train_iter, dev_iter, test_iter, model, alphabet, config):
"""
:param train_iter: train batch data iterator
:param dev_iter: dev batch data iterator
:param test_iter: test batch data iterator
:param model: nn model
:param alphabet: alphabet dict
:param config: config
:return: None
"""
print("\nTesting Start......")
data, path_source, path_result = load_test_data(train_iter, dev_iter, test_iter, config)
infer = T_Inference(model=model, data=data, path_source=path_source, path_result=path_result, alphabet=alphabet,
use_crf=config.use_crf, config=config)
infer.infer2file()
print("Finished Test.")
def main():
"""
main()
:return:
"""
# save file
config.mulu = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
# config.add_args(key="mulu", value=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
config.save_dir = os.path.join(config.save_direction, config.mulu)
if not os.path.isdir(config.save_dir): os.makedirs(config.save_dir)
# get data, iter, alphabet
train_iter, dev_iter, test_iter, alphabet = load_data(config=config)
# get params
get_params(config=config, alphabet=alphabet)
# save dictionary
save_dictionary(config=config)
model = load_model(config)
# print("Training Start......")
if config.train is True:
start_train(train_iter, dev_iter, test_iter, model, config)
exit()
elif config.test is True:
start_test(train_iter, dev_iter, test_iter, model, alphabet, config)
exit()
def parse_argument():
"""
:argument
:return:
"""
parser = argparse.ArgumentParser(description="NER & POS")
parser.add_argument("-c", "--config", dest="config_file", type=str, default="./Config/config.cfg",help="config path")
parser.add_argument("-device", "--device", dest="device", type=str, default="cuda:0", help="device[‘cpu’,‘cuda:0’,‘cuda:1’,......]")
parser.add_argument("--train", dest="train", action="store_true", default=True, help="train model")
parser.add_argument("-p", "--process", dest="process", action="store_true", default=False, help="data process")
parser.add_argument("-t", "--test", dest="test", action="store_true", default=False, help="test model")
parser.add_argument("--t_model", dest="t_model", type=str, default=None, help="model for test")
parser.add_argument("--t_data", dest="t_data", type=str, default=None, help="data[train, dev, test, None] for test model")
parser.add_argument("--predict", dest="predict", action="store_true", default=False, help="predict model")
args = parser.parse_args()
# print(vars(args))
config = configurable.Configurable(config_file=args.config_file)
config.device = args.device
config.train = args.train
config.process = args.process
config.test = args.test
config.t_model = args.t_model
config.t_data = args.t_data
config.predict = args.predict
# config
if config.test is True:
config.train = False
if config.t_data not in [None, "train", "dev", "test"]:
print("\nUsage")
parser.print_help()
print("t_data : {}, not in [None, 'train', 'dev', 'test']".format(config.t_data))
exit()
print("***************************************")
print("Device : {}".format(config.device))
print("Data Process : {}".format(config.process))
print("Train model : {}".format(config.train))
print("Test model : {}".format(config.test))
print("t_model : {}".format(config.t_model))
print("t_data : {}".format(config.t_data))
print("predict : {}".format(config.predict))
print("***************************************")
return config
if __name__ == "__main__":
print("Process ID {}, Process Parent ID {}".format(os.getpid(), os.getppid()))
config = parse_argument()
if config.device != cpu_device:
print("Using GPU To Train......")
#device_number = config.device[-1]
device_number = config.device.split(":")[-1]
torch.cuda.set_device(int(device_number))
print("Current Cuda Device {}".format(torch.cuda.current_device()))
# torch.backends.cudnn.enabled = True
# torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
print("torch.cuda.initial_seed", torch.cuda.initial_seed())
main()
| 5,673 | 34.685535 | 136 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/test.py | # @Author : bamtercelboo
# @Datetime : 2018/8/24 15:27
# @File : test.py
# @Last Modify Time : 2018/8/24 15:27
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : test.py
FUNCTION : None
"""
import os
import sys
import torch
from DataUtils.utils import *
from DataUtils.Common import *
def load_test_model(model, config):
"""
:param model: initial model
:param config: config
:return: loaded model
"""
if config.t_model is None:
test_model_dir = config.save_best_model_dir
test_model_name = "{}.pt".format(config.model_name)
test_model_path = os.path.join(test_model_dir, test_model_name)
print("load default model from {}".format(test_model_path))
else:
test_model_path = config.t_model
print("load user model from {}".format(test_model_path))
model.load_state_dict(torch.load(test_model_path))
return model
def load_test_data(train_iter=None, dev_iter=None, test_iter=None, config=None):
"""
:param train_iter: train data
:param dev_iter: dev data
:param test_iter: test data
:param config: config
:return: data for test
"""
data, path_source, path_result = None, None, None
if config.t_data is None:
print("default[test] for model test.")
data = test_iter
path_source = ".".join([config.test_file, shuffle])
path_result = "{}.out".format(path_source)
elif config.t_data == "train":
print("train data for model test.")
data = train_iter
path_source = ".".join([config.train_file, shuffle])
path_result = "{}.out".format(path_source)
elif config.t_data == "dev":
print("dev data for model test.")
data = dev_iter
path_source = ".".join([config.dev_file, shuffle])
path_result = "{}.out".format(path_source)
elif config.t_data == "test":
print("test data for model test.")
data = test_iter
path_source = ".".join([config.test_file, shuffle])
path_result = "{}.out".format(path_source)
else:
print("Error value --- t_data = {}, must in [None, 'train', 'dev', 'test'].".format(config.t_data))
exit()
return data, path_source, path_result
class T_Inference(object):
"""
Test Inference
"""
def __init__(self, model, data, path_source, path_result, alphabet, use_crf, config):
"""
:param model: nn model
:param data: infer data
:param path_source: source data path
:param path_result: result data path
:param alphabet: alphabet
:param config: config
"""
print("Initialize T_Inference")
self.model = model
self.data = data
self.path_source = path_source
self.path_result = path_result
self.alphabet = alphabet
self.config = config
self.use_crf = use_crf
def infer2file(self):
"""
:return: None
"""
print("infer.....")
self.model.eval()
predict_labels = []
predict_label = []
all_count = len(self.data)
now_count = 0
for data in self.data:
now_count += 1
sys.stdout.write("\rinfer with batch number {}/{} .".format(now_count, all_count))
word, char, mask, sentence_length, tags = self._get_model_args(data)
logit = self.model(word, char, sentence_length, train=False)
if self.use_crf is False:
predict_ids = torch_max(logit)
for id_batch in range(data.batch_length):
inst = data.inst[id_batch]
label_ids = predict_ids[id_batch]
# maxId_batch = getMaxindex_batch(logit[id_batch])
for id_word in range(inst.words_size):
predict_label.append(self.alphabet.label_alphabet.from_id(label_ids[id_word]))
else:
path_score, best_paths = self.model.crf_layer(logit, mask)
for id_batch in range(data.batch_length):
inst = data.inst[id_batch]
label_ids = best_paths[id_batch].cpu().data.numpy()[:inst.words_size]
for i in label_ids:
predict_label.append(self.alphabet.label_alphabet.from_id(i))
print("\ninfer finished.")
self.write2file(result=predict_label, path_source=self.path_source, path_result=self.path_result)
@staticmethod
def write2file(result, path_source, path_result):
"""
:param result:
:param path_source:
:param path_result:
:return:
"""
print("write result to file {}".format(path_result))
if os.path.exists(path_source) is False:
print("source data path[path_source] is not exist.")
if os.path.exists(path_result):
os.remove(path_result)
file_out = open(path_result, encoding="UTF-8", mode="w")
with open(path_source, encoding="UTF-8") as file:
id = 0
for line in file.readlines():
sys.stdout.write("\rwrite with {}/{} .".format(id+1, len(result)))
if line == "\n":
file_out.write("\n")
continue
line = line.strip().split()
line.append(result[id])
id += 1
file_out.write(" ".join(line) + "\n")
if id >= len(result):
break
file_out.close()
print("\nfinished.")
@staticmethod
def _get_model_args(batch_features):
"""
:param batch_features: Batch Instance
:return:
"""
word = batch_features.word_features
char = batch_features.char_features
mask = word > 0
sentence_length = batch_features.sentence_length
# desorted_indices = batch_features.desorted_indices
tags = batch_features.label_features
return word, char, mask, sentence_length, tags
| 6,069 | 34.086705 | 107 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/trainer.py | # @Author : bamtercelboo
# @Datetime : 2018/8/26 8:30
# @File : trainer.py
# @Last Modify Time : 2018/8/26 8:30
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : trainer.py
FUNCTION : None
"""
import os, pdb
import seqeval.metrics
from seqeval.scheme import IOB2
import sys
import time
import numpy as np
import random
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.utils as utils
from DataUtils.Optim import Optimizer
from DataUtils.utils import *
from DataUtils.eval_bio import entity_evalPRF_exact, entity_evalPRF_propor, entity_evalPRF_binary
from DataUtils.eval import Eval, EvalPRF
from DataUtils.Common import *
torch.manual_seed(seed_num)
random.seed(seed_num)
class Train(object):
"""
Train
"""
def __init__(self, **kwargs):
"""
:param kwargs:
Args of data:
train_iter : train batch data iterator
dev_iter : dev batch data iterator
test_iter : test batch data iterator
Args of train:
model : nn model
config : config
"""
print("Training Start......")
# for k, v in kwargs.items():
# self.__setattr__(k, v)
self.train_iter = kwargs["train_iter"]
self.dev_iter = kwargs["dev_iter"]
self.test_iter = kwargs["test_iter"]
self.model = kwargs["model"]
self.config = kwargs["config"]
self.use_crf = self.config.use_crf
self.average_batch = self.config.average_batch
self.early_max_patience = self.config.early_max_patience
self.optimizer = Optimizer(name=self.config.learning_algorithm, model=self.model,
lr=self.config.learning_rate,
weight_decay=self.config.weight_decay,
grad_clip=self.config.clip_max_norm,
momentum=self.config.momentum)
self.loss_function = self._loss(learning_algorithm=self.config.learning_algorithm,
label_paddingId=self.config.label_paddingId, use_crf=self.use_crf)
print(self.optimizer)
print(self.loss_function)
self.best_score = Best_Result()
self.train_eval, self.dev_eval, self.test_eval = Eval(), Eval(), Eval()
self.train_iter_len = len(self.train_iter)
def _loss(self, learning_algorithm, label_paddingId, use_crf=False):
"""
:param learning_algorithm:
:param label_paddingId:
:param use_crf:
:return:
"""
if use_crf:
loss_function = self.model.crf_layer.neg_log_likelihood_loss
return loss_function
elif learning_algorithm == "SGD":
loss_function = nn.CrossEntropyLoss(ignore_index=label_paddingId, reduction="sum")
return loss_function
else:
loss_function = nn.CrossEntropyLoss(ignore_index=label_paddingId, reduction="mean")
return loss_function
def _clip_model_norm(self, clip_max_norm_use, clip_max_norm):
"""
:param clip_max_norm_use: whether to use clip max norm for nn model
:param clip_max_norm: clip max norm max values [float or None]
:return:
"""
if clip_max_norm_use is True:
gclip = None if clip_max_norm == "None" else float(clip_max_norm)
assert isinstance(gclip, float)
utils.clip_grad_norm_(self.model.parameters(), max_norm=gclip)
def _dynamic_lr(self, config, epoch, new_lr):
"""
:param config: config
:param epoch: epoch
:param new_lr: learning rate
:return:
"""
if config.use_lr_decay is True and epoch > config.max_patience and (
epoch - 1) % config.max_patience == 0 and new_lr > config.min_lrate:
new_lr = max(new_lr * config.lr_rate_decay, config.min_lrate)
set_lrate(self.optimizer, new_lr)
return new_lr
def _decay_learning_rate(self, epoch, init_lr):
"""lr decay
Args:
epoch: int, epoch
init_lr: initial lr
"""
lr = init_lr / (1 + self.config.lr_rate_decay * epoch)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return self.optimizer
def _optimizer_batch_step(self, config, backward_count):
"""
:param config:
:param backward_count:
:return:
"""
if backward_count % config.backward_batch_size == 0 or backward_count == self.train_iter_len:
self.optimizer.step()
self.optimizer.zero_grad()
def _early_stop(self, epoch):
"""
:param epoch:
:return:
"""
best_epoch = self.best_score.best_epoch
if epoch > best_epoch:
self.best_score.early_current_patience += 1
print("Dev Has Not Promote {} / {}".format(self.best_score.early_current_patience, self.early_max_patience))
if self.best_score.early_current_patience >= self.early_max_patience:
print("Early Stop Train. Best Score Locate on {} Epoch.".format(self.best_score.best_epoch))
exit()
@staticmethod
def _get_model_args(batch_features):
"""
:param batch_features: Batch Instance
:return:
"""
word = batch_features.word_features
char = batch_features.char_features
mask = word > 0
sentence_length = batch_features.sentence_length
tags = batch_features.label_features
return word, char, mask, sentence_length, tags
def _calculate_loss(self, feats, mask, tags):
"""
Args:
feats: size = (batch_size, seq_len, tag_size)
mask: size = (batch_size, seq_len)
tags: size = (batch_size, seq_len)
"""
if not self.use_crf:
batch_size, max_len = feats.size(0), feats.size(1)
lstm_feats = feats.view(batch_size * max_len, -1)
tags = tags.view(-1)
return self.loss_function(lstm_feats, tags)
else:
loss_value = self.loss_function(feats, mask, tags)
if self.average_batch:
batch_size = feats.size(0)
loss_value /= float(batch_size)
return loss_value
def train(self):
"""
:return:
"""
epochs = self.config.epochs
clip_max_norm_use = self.config.clip_max_norm_use
clip_max_norm = self.config.clip_max_norm
new_lr = self.config.learning_rate
for epoch in range(1, epochs + 1):
print("\n## The {} Epoch, All {} Epochs ! ##".format(epoch, epochs))
# new_lr = self._dynamic_lr(config=self.config, epoch=epoch, new_lr=new_lr)
self.optimizer = self._decay_learning_rate(epoch=epoch - 1, init_lr=self.config.learning_rate)
print("now lr is {}".format(self.optimizer.param_groups[0].get("lr")), end="")
start_time = time.time()
random.shuffle(self.train_iter)
self.model.train()
steps = 1
backward_count = 0
self.optimizer.zero_grad()
for batch_count, batch_features in enumerate(self.train_iter):
backward_count += 1
# self.optimizer.zero_grad()
word, char, mask, sentence_length, tags = self._get_model_args(batch_features)
logit = self.model(word, char, sentence_length, train=True)
loss = self._calculate_loss(logit, mask, tags)
loss.backward()
self._clip_model_norm(clip_max_norm_use, clip_max_norm)
self._optimizer_batch_step(config=self.config, backward_count=backward_count)
# self.optimizer.step()
steps += 1
if (steps - 1) % self.config.log_interval == 0:
self.getAcc(self.train_eval, batch_features, logit, self.config)
sys.stdout.write(
"\nbatch_count = [{}] , loss is {:.6f}, [TAG-ACC is {:.6f}%]".format(batch_count + 1, loss.item(), self.train_eval.acc()))
end_time = time.time()
print("\nTrain Time {:.3f}".format(end_time - start_time), end="")
self.eval(model=self.model, epoch=epoch, config=self.config)
self._model2file(model=self.model, config=self.config, epoch=epoch)
self._early_stop(epoch=epoch)
def eval(self, model, epoch, config):
"""
:param model: nn model
:param epoch: epoch
:param config: config
:return:
"""
self.dev_eval.clear_PRF()
eval_start_time = time.time()
self.eval_batch(self.dev_iter, model, self.dev_eval, self.best_score, epoch, config, test=False)
eval_end_time = time.time()
print("Dev Time {:.3f}".format(eval_end_time - eval_start_time))
self.test_eval.clear_PRF()
eval_start_time = time.time()
self.eval_batch(self.test_iter, model, self.test_eval, self.best_score, epoch, config, test=True)
eval_end_time = time.time()
print("Test Time {:.3f}".format(eval_end_time - eval_start_time))
def _model2file(self, model, config, epoch):
"""
:param model: nn model
:param config: config
:param epoch: epoch
:return:
"""
if config.save_model and config.save_all_model:
save_model_all(model, config.save_dir, config.model_name, epoch)
elif config.save_model and config.save_best_model:
save_best_model(model, config.save_best_model_path, config.model_name, self.best_score)
else:
print()
def eval_batch(self, data_iter, model, eval_instance, best_score, epoch, config, test=False):
"""
:param data_iter: eval batch data iterator
:param model: eval model
:param eval_instance:
:param best_score:
:param epoch:
:param config: config
:param test: whether to test
:return: None
"""
model.eval()
# eval time
eval_acc = Eval()
eval_PRF = EvalPRF()
gold_labels = []
predict_labels = []
for batch_features in data_iter:
word, char, mask, sentence_length, tags = self._get_model_args(batch_features)
logit = model(word, char, sentence_length, train=False)
if self.use_crf is False:
predict_ids = torch_max(logit)
for id_batch in range(batch_features.batch_length):
inst = batch_features.inst[id_batch]
label_ids = predict_ids[id_batch]
predict_label = []
for id_word in range(inst.words_size):
predict_label.append(config.create_alphabet.label_alphabet.from_id(label_ids[id_word]))
gold_labels.append(inst.labels)
predict_labels.append(predict_label)
else:
path_score, best_paths = model.crf_layer(logit, mask)
for id_batch in range(batch_features.batch_length):
inst = batch_features.inst[id_batch]
gold_labels.append(inst.labels)
label_ids = best_paths[id_batch].cpu().data.numpy()[:inst.words_size]
label = []
for i in label_ids:
# print("\n", i)
label.append(config.create_alphabet.label_alphabet.from_id(int(i)))
predict_labels.append(label)
for p_label, g_label in zip(predict_labels, gold_labels):
eval_PRF.evalPRF(predict_labels=p_label, gold_labels=g_label, eval=eval_instance)
if eval_acc.gold_num == 0:
eval_acc.gold_num = 1
p, r, f = eval_instance.getFscore()
f_seq = seqeval.metrics.f1_score(gold_labels, predict_labels, mode='strict')
p_seq = seqeval.metrics.precision_score(gold_labels, predict_labels, mode='strict')
r_seq = seqeval.metrics.recall_score(gold_labels, predict_labels, mode='strict')
# p, r, f = entity_evalPRF_exact(gold_labels=gold_labels, predict_labels=predict_labels)
# p, r, f = entity_evalPRF_propor(gold_labels=gold_labels, predict_labels=predict_labels)
# p, r, f = entity_evalPRF_binary(gold_labels=gold_labels, predict_labels=predict_labels)
test_flag = "Test"
if test is False:
print()
test_flag = "Dev"
best_score.current_dev_score = f
if f >= best_score.best_dev_score:
best_score.best_dev_score = f
best_score.best_epoch = epoch
best_score.best_test = True
#seqeval computed scores
best_score.best_dev_f_seq = f_seq
best_score.best_dev_p_seq = p_seq
best_score.best_dev_r_seq = r_seq
if test is True and best_score.best_test is True:
best_score.p = p
best_score.r = r
best_score.f = f
#seqeval computed scores
best_score.best_test_f_seq = f_seq
best_score.best_test_p_seq = p_seq
best_score.best_test_r_seq = r_seq
print("{} eval: precision = {:.6f}% recall = {:.6f}% , f-score = {:.6f}%, [TAG-ACC = {:.6f}%]".format(
test_flag, p, r, f, 0.0000))
print("*** {} seqeval: precision = {:.6f}% recall = {:.6f}% , f-score = {:.6f}%, [TAG-ACC = {:.6f}%]".format(
test_flag, p_seq, r_seq, f_seq, 0.0000))
if test is True:
print("The Current Best Dev F-score: {:.6f}, Locate on {} Epoch.".format(best_score.best_dev_score,
best_score.best_epoch))
print("The Current Best Test Result: precision = {:.6f}% recall = {:.6f}% , f-score = {:.6f}%".format(
best_score.p, best_score.r, best_score.f))
print("*** The Current Best Dev seqeval Result: precision = {:.6f}% recall = {:.6f}% , f-score = {:.6f}%".format(
best_score.best_dev_p_seq, best_score.best_dev_r_seq, best_score.best_dev_f_seq ))
print("*** The Current Best Test seqeval Result: precision = {:.6f}% recall = {:.6f}% , f-score = {:.6f}%".format(
best_score.best_test_p_seq, best_score.best_test_r_seq, best_score.best_test_f_seq ))
if test is True:
best_score.best_test = False
@staticmethod
def getAcc(eval_acc, batch_features, logit, config):
"""
:param eval_acc: eval instance
:param batch_features: batch data feature
:param logit: model output
:param config: config
:return:
"""
eval_acc.clear_PRF()
predict_ids = torch_max(logit)
for id_batch in range(batch_features.batch_length):
inst = batch_features.inst[id_batch]
label_ids = predict_ids[id_batch]
predict_label = []
gold_lable = inst.labels
for id_word in range(inst.words_size):
predict_label.append(config.create_alphabet.label_alphabet.from_id(label_ids[id_word]))
assert len(predict_label) == len(gold_lable)
cor = 0
for p_lable, g_lable in zip(predict_label, gold_lable):
if p_lable == g_lable:
cor += 1
eval_acc.correct_num += cor
eval_acc.gold_num += len(gold_lable)
| 15,711 | 41.579946 | 146 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/mainHelp.py | # @Author : bamtercelboo
# @Datetime : 2018/9/3 10:50
# @File : mainHelp.py
# @Last Modify Time : 2018/9/3 10:50
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : mainHelp.py
FUNCTION : None
"""
import shutil, pdb
import time
from DataUtils.Alphabet import *
from DataUtils.Batch_Iterator import *
from DataUtils.Pickle import pcl
from DataUtils.Embed import Embed
from Dataloader.DataLoader_NER import DataLoader
# from models.BiLSTM_Context import *
# from models.BiLSTM import BiLSTM
from models.Sequence_Label import Sequence_Label
from test import load_test_model
# solve default encoding problem
from imp import reload
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
# random seed
torch.manual_seed(seed_num)
random.seed(seed_num)
def get_learning_algorithm(config):
"""
:param config: config
:return: optimizer algorithm
"""
algorithm = None
if config.adam is True:
algorithm = "Adam"
elif config.sgd is True:
algorithm = "SGD"
print("learning algorithm is {}.".format(algorithm))
return algorithm
def get_params(config, alphabet):
"""
:param config: config
:param alphabet: alphabet dict
:return:
"""
# get algorithm
config.learning_algorithm = get_learning_algorithm(config)
# save best model path
config.save_best_model_path = config.save_best_model_dir
if config.test is False:
if os.path.exists(config.save_best_model_path):
shutil.rmtree(config.save_best_model_path)
# get params
config.embed_num = alphabet.word_alphabet.vocab_size
config.char_embed_num = alphabet.char_alphabet.vocab_size
config.class_num = alphabet.label_alphabet.vocab_size
config.paddingId = alphabet.word_paddingId
config.char_paddingId = alphabet.char_paddingId
config.label_paddingId = alphabet.label_paddingId
config.create_alphabet = alphabet
print("embed_num : {}, char_embed_num: {}, class_num : {}".format(config.embed_num, config.char_embed_num,
config.class_num))
print("PaddingID {}".format(config.paddingId))
print("char PaddingID {}".format(config.char_paddingId))
def save_dict2file(dict, path):
"""
:param dict: dict
:param path: path to save dict
:return:
"""
print("Saving dictionary")
if os.path.exists(path):
print("path {} is exist, deleted.".format(path))
file = open(path, encoding="UTF-8", mode="w")
for word, index in dict.items():
# print(word, index)
file.write(str(word) + "\t" + str(index) + "\n")
file.close()
print("Save dictionary finished.")
def save_dictionary(config):
"""
:param config: config
:return:
"""
if config.save_dict is True:
if os.path.exists(config.dict_directory):
shutil.rmtree(config.dict_directory)
if not os.path.isdir(config.dict_directory):
os.makedirs(config.dict_directory)
config.word_dict_path = "/".join([config.dict_directory, config.word_dict])
config.label_dict_path = "/".join([config.dict_directory, config.label_dict])
print("word_dict_path : {}".format(config.word_dict_path))
print("label_dict_path : {}".format(config.label_dict_path))
save_dict2file(config.create_alphabet.word_alphabet.words2id, config.word_dict_path)
save_dict2file(config.create_alphabet.label_alphabet.words2id, config.label_dict_path)
# copy to mulu
print("copy dictionary to {}".format(config.save_dir))
shutil.copytree(config.dict_directory, "/".join([config.save_dir, config.dict_directory]))
# load data / create alphabet / create iterator
def preprocessing(config):
"""
:param config: config
:return:
"""
print("Processing Data......")
# read file
data_loader = DataLoader(path=[config.train_file, config.dev_file, config.test_file], shuffle=True, config=config)
train_data, dev_data, test_data = data_loader.dataLoader()
print("train sentence {}, dev sentence {}, test sentence {}.".format(len(train_data), len(dev_data), len(test_data)))
data_dict = {"train_data": train_data, "dev_data": dev_data, "test_data": test_data}
if config.save_pkl:
torch.save(obj=data_dict, f=os.path.join(config.pkl_directory, config.pkl_data))
# create the alphabet
alphabet = None
if config.embed_finetune is False:
alphabet = CreateAlphabet(min_freq=config.min_freq, train_data=train_data, dev_data=dev_data, test_data=test_data, config=config)
alphabet.build_vocab()
if config.embed_finetune is True:
alphabet = CreateAlphabet(min_freq=config.min_freq, train_data=train_data, dev_data=dev_data, test_data=test_data, config=config)
# alphabet = CreateAlphabet(min_freq=config.min_freq, train_data=train_data, config=config)
alphabet.build_vocab()
alphabet_dict = {"alphabet": alphabet}
if config.save_pkl:
torch.save(obj=alphabet_dict, f=os.path.join(config.pkl_directory, config.pkl_alphabet))
# create iterator
create_iter = Iterators(batch_size=[config.batch_size, config.dev_batch_size, config.test_batch_size],
data=[train_data, dev_data, test_data], operator=alphabet, device=config.device,
config=config)
train_iter, dev_iter, test_iter = create_iter.createIterator()
iter_dict = {"train_iter": train_iter, "dev_iter": dev_iter, "test_iter": test_iter}
if config.save_pkl:
torch.save(obj=iter_dict, f=os.path.join(config.pkl_directory, config.pkl_iter))
return train_iter, dev_iter, test_iter, alphabet
def pre_embed(config, alphabet):
"""
:param config: config
:param alphabet: alphabet dict
:return: pre-train embed
"""
print("***************************************")
pretrain_embed = None
embed_types = ""
if config.pretrained_embed and config.zeros:
embed_types = "zero"
elif config.pretrained_embed and config.avg:
embed_types = "avg"
elif config.pretrained_embed and config.uniform:
embed_types = "uniform"
elif config.pretrained_embed and config.nnembed:
embed_types = "nn"
if config.pretrained_embed is True:
p = Embed(path=config.pretrained_embed_file, words_dict=alphabet.word_alphabet.id2words, embed_type=embed_types,
pad=paddingkey)
pretrain_embed = p.get_embed()
embed_dict = {"pretrain_embed": pretrain_embed}
# pcl.save(obj=embed_dict, path=os.path.join(config.pkl_directory, config.pkl_embed))
torch.save(obj=embed_dict, f=os.path.join(config.pkl_directory, config.pkl_embed))
return pretrain_embed
def load_model(config):
"""
:param config: config
:return: nn model
"""
print("***************************************")
model = Sequence_Label(config)
print("Copy models to {}".format(config.save_dir))
shutil.copytree("models", "/".join([config.save_dir, "models"]))
if config.device != cpu_device:
model = model.cuda()
if config.test is True:
model = load_test_model(model, config)
print(model)
return model
def load_data(config):
"""
:param config: config
:return: batch data iterator and alphabet
"""
print("load data for process or pkl data.")
train_iter, dev_iter, test_iter = None, None, None
alphabet = None
start_time = time.time()
if (config.train is True) and (config.process is True):
print("process data")
if os.path.exists(config.pkl_directory): shutil.rmtree(config.pkl_directory)
if not os.path.isdir(config.pkl_directory): os.makedirs(config.pkl_directory)
train_iter, dev_iter, test_iter, alphabet = preprocessing(config)
config.pretrained_weight = pre_embed(config=config, alphabet=alphabet)
elif ((config.train is True) and (config.process is False)) or (config.test is True):
print("load data from pkl file")
# load alphabet from pkl
# alphabet_dict = pcl.load(path=os.path.join(config.pkl_directory, config.pkl_alphabet))
alphabet_dict = torch.load(f=os.path.join(config.pkl_directory, config.pkl_alphabet))
print(alphabet_dict.keys())
alphabet = alphabet_dict["alphabet"]
# load iter from pkl
# iter_dict = pcl.load(path=os.path.join(config.pkl_directory, config.pkl_iter))
iter_dict = torch.load(f=os.path.join(config.pkl_directory, config.pkl_iter))
print(iter_dict.keys())
train_iter, dev_iter, test_iter = iter_dict.values()
# train_iter, dev_iter, test_iter = iter_dict["train_iter"], iter_dict["dev_iter"], iter_dict["test_iter"]
# load embed from pkl
config.pretrained_weight = None
if os.path.exists(os.path.join(config.pkl_directory, config.pkl_embed)):
# embed_dict = pcl.load(os.path.join(config.pkl_directory, config.pkl_embed))
embed_dict = torch.load(f=os.path.join(config.pkl_directory, config.pkl_embed))
print(embed_dict.keys())
embed = embed_dict["pretrain_embed"]
config.pretrained_weight = embed
end_time = time.time()
print("All Data/Alphabet/Iterator Use Time {:.4f}".format(end_time - start_time))
print("***************************************")
return train_iter, dev_iter, test_iter, alphabet
| 9,548 | 37.817073 | 137 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/utils.py | # @Author : bamtercelboo
# @Datetime : 2018/8/24 9:58
# @File : utils.py
# @Last Modify Time : 2018/8/24 9:58
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : utils.py
FUNCTION : None
"""
import sys
import os
import torch
import numpy as np
class Best_Result:
"""
Best_Result
"""
def __init__(self):
self.current_dev_score = -1
self.best_dev_score = -1
self.best_score = -1
self.best_epoch = 1
self.best_test = False
self.early_current_patience = 0
self.p = -1
self.r = -1
self.f = -1
def getMaxindex(model_out, label_size, args):
"""
:param model_out: model output
:param label_size: label size
:param args: argument
:return: max index for predict
"""
max = model_out.data[0]
maxIndex = 0
for idx in range(1, label_size):
if model_out.data[idx] > max:
max = model_out.data[idx]
maxIndex = idx
return maxIndex
def getMaxindex_np(model_out):
"""
:param model_out: model output
:return: max index for predict
"""
model_out_list = model_out.data.tolist()
maxIndex = model_out_list.index(np.max(model_out_list))
return maxIndex
def getMaxindex_batch(model_out):
"""
:param model_out: model output
:return: max index for predict
"""
model_out_list = model_out.data.tolist()
maxIndex_batch = []
for l in model_out_list:
maxIndex_batch.append(l.index(np.max(l)))
return maxIndex_batch
def torch_max(output):
"""
:param output: batch * seq_len * label_num
:return:
"""
# print(output)
batch_size = output.size(0)
_, arg_max = torch.max(output, dim=2)
# print(arg_max)
label = []
for i in range(batch_size):
label.append(arg_max[i].cpu().data.numpy())
return label
def save_model_all(model, save_dir, model_name, epoch):
"""
:param model: nn model
:param save_dir: save model direction
:param model_name: model name
:param epoch: epoch
:return: None
"""
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_prefix = os.path.join(save_dir, model_name)
save_path = '{}_epoch_{}.pt'.format(save_prefix, epoch)
print("save all model to {}".format(save_path))
output = open(save_path, mode="wb")
torch.save(model.state_dict(), output)
# torch.save(model.state_dict(), save_path)
output.close()
def save_best_model(model, save_dir, model_name, best_eval):
"""
:param model: nn model
:param save_dir: save model direction
:param model_name: model name
:param best_eval: eval best
:return: None
"""
if best_eval.current_dev_score >= best_eval.best_dev_score:
if not os.path.isdir(save_dir): os.makedirs(save_dir)
model_name = "{}.pt".format(model_name)
save_path = os.path.join(save_dir, model_name)
print("save best model to {}".format(save_path))
# if os.path.exists(save_path): os.remove(save_path)
output = open(save_path, mode="wb")
torch.save(model.state_dict(), output)
# torch.save(model.state_dict(), save_path)
output.close()
best_eval.early_current_patience = 0
# adjust lr
def get_lrate(optim):
"""
:param optim: optimizer
:return:
"""
for group in optim.param_groups:
yield group['lr']
def set_lrate(optim, lr):
"""
:param optim: optimizer
:param lr: learning rate
:return:
"""
for group in optim.param_groups:
group['lr'] = lr
| 3,605 | 23.69863 | 63 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/Load_Pretrained_Embed.py | # @Author : bamtercelboo
# @Datetime : 2018/08/27 09.59
# @File : Load_Pretrained_Embed.py
# @Last Modify Time : 2018/08/27 09.59
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Load_Pretrained_Embed.py
FUNCTION : loading pretrained word embedding
"""
import torch
import torch.nn as nn
import torch.nn.init as init
from collections import OrderedDict
import numpy as np
import tqdm
from DataUtils.Common import *
torch.manual_seed(seed_num)
np.random.seed(seed_num)
def load_pretrained_emb_zeros(path, text_field_words_dict, pad=None, set_padding=False):
print("loading pre_train embedding by zeros......")
if not isinstance(text_field_words_dict, dict):
text_field_words_dict = convert_list2dict(text_field_words_dict)
if pad is not None:
padID = text_field_words_dict[pad]
embedding_dim = -1
with open(path, encoding='utf-8') as f:
for line in f:
line_split = line.strip().split(' ')
if len(line_split) == 1:
embedding_dim = line_split[0]
break
elif len(line_split) == 2:
embedding_dim = line_split[1]
break
else:
embedding_dim = len(line_split) - 1
break
f.close()
word_count = len(text_field_words_dict)
print('The number of wordsDict is {} \nThe dim of pretrained embedding is {}'.format(str(word_count),
str(embedding_dim)))
embeddings = np.zeros((int(word_count), int(embedding_dim)))
iv_num = 0
oov_num = 0
with open(path, encoding='utf-8') as f:
lines = f.readlines()
# lines = tqdm.tqdm(lines)
for line in lines:
values = line.strip().split(' ')
if len(values) == 1 or len(values) == 2:
continue
index = text_field_words_dict.get(values[0]) # digit or None
if index:
iv_num += 1
vector = np.array([float(i) for i in values[1:]], dtype='float32')
embeddings[index] = vector
f.close()
oov_num = word_count - iv_num
print("iv_num {} oov_num {} oov_radio {:.4f}%".format(iv_num, oov_num, round((oov_num / word_count) * 100, 4)))
return torch.from_numpy(embeddings).float()
def load_pretrained_emb_Embedding(path, text_field_words_dict, pad=None, set_padding=False):
"""
:param path:
:param text_field_words_dict:
:param pad:
:param set_padding:
:return:
"""
print("loading pre_train embedding by nn.Embedding......")
if not isinstance(text_field_words_dict, dict):
text_field_words_dict, text_field_words_list = convert_list2dict(text_field_words_dict)
if pad is not None:
padID = text_field_words_dict[pad]
# print(text_field_words_dict)
embedding_dim = -1
with open(path, encoding='utf-8') as f:
for line in f:
line_split = line.strip().split(' ')
if len(line_split) == 1:
embedding_dim = line_split[0]
break
elif len(line_split) == 2:
embedding_dim = line_split[1]
break
else:
embedding_dim = len(line_split) - 1
break
f.close()
word_count = len(text_field_words_dict)
print('The number of wordsDict is {} \nThe dim of pretrained embedding is {}'.format(str(word_count),
str(embedding_dim)))
embed = nn.Embedding(int(word_count), int(embedding_dim))
init.xavier_uniform(embed.weight.data)
embeddings = np.array(embed.weight.data)
iv_num = 0
fuzzy_num = 0
oov_num = 0
with open(path, encoding='utf-8') as f:
lines = f.readlines()
lines = tqdm.tqdm(lines)
for line in lines:
values = line.strip().split(' ')
if len(values) == 1 or len(values) == 2:
continue
word = values[0]
# if word in
index = text_field_words_dict.get(word, None) # digit or None
if index is None:
if word.lower() in text_field_words_list:
fuzzy_num += 1
index = text_field_words_list.index(word.lower())
if index:
iv_num += 1
vector = np.array([float(i) for i in values[1:]], dtype='float32')
embeddings[index] = vector
# else:
# print(word)
f.close()
oov_num = word_count - iv_num
print("iv_num {}(fuzzy_num = {}) oov_num {} oov_radio {:.4f}%".format(iv_num, fuzzy_num, oov_num, round((oov_num / word_count) * 100, 4)))
return torch.from_numpy(embeddings).float()
def load_pretrained_emb_avg(path, text_field_words_dict, pad=None, set_padding=False):
print("loading pre_train embedding by avg......")
if not isinstance(text_field_words_dict, dict):
text_field_words_dict = convert_list2dict(text_field_words_dict)
assert pad is not None, "pad not allow with None"
padID = text_field_words_dict[pad]
embedding_dim = -1
with open(path, encoding='utf-8') as f:
for line in f:
line_split = line.strip().split(' ')
if len(line_split) == 1:
embedding_dim = line_split[0]
break
elif len(line_split) == 2:
embedding_dim = line_split[1]
break
else:
embedding_dim = len(line_split) - 1
break
f.close()
word_count = len(text_field_words_dict)
print('The number of wordsDict is {} \nThe dim of pretrained embedding is {}\n'.format(str(word_count),
str(embedding_dim)))
embeddings = np.zeros((int(word_count), int(embedding_dim)))
inword_list = {}
with open(path, encoding='utf-8') as f:
lines = f.readlines()
lines = tqdm.tqdm(lines)
for line in lines:
lines.set_description("Processing")
values = line.strip().split(" ")
if len(values) == 1 or len(values) == 2:
continue
index = text_field_words_dict.get(values[0]) # digit or None
if index:
vector = np.array([float(i) for i in values[1:]], dtype='float32')
embeddings[index] = vector
inword_list[index] = 1
f.close()
print("oov words initial by avg embedding, maybe take a while......")
sum_col = np.sum(embeddings, axis=0) / len(inword_list) # avg
for i in range(len(text_field_words_dict)):
if i not in inword_list and i != padID:
embeddings[i] = sum_col
OOVWords = word_count - len(inword_list)
oov_radio = np.round(OOVWords / word_count, 6)
print("All Words = {}, InWords = {}, OOVWords = {}, OOV Radio={}".format(
word_count, len(inword_list), OOVWords, oov_radio))
return torch.from_numpy(embeddings).float()
def load_pretrained_emb_uniform(path, text_field_words_dict, pad=None, set_padding=False):
print("loading pre_train embedding by uniform......")
if not isinstance(text_field_words_dict, dict):
text_field_words_dict = convert_list2dict(text_field_words_dict)
assert pad is not None, "pad not allow with None"
padID = text_field_words_dict[pad]
embedding_dim = -1
with open(path, encoding='utf-8') as f:
for line in f:
line_split = line.strip().split(' ')
if len(line_split) == 1:
embedding_dim = line_split[0]
break
elif len(line_split) == 2:
embedding_dim = line_split[1]
break
else:
embedding_dim = len(line_split) - 1
break
f.close()
word_count = len(text_field_words_dict)
print('The number of wordsDict is {} \nThe dim of pretrained embedding is {}\n'.format(str(word_count),
str(embedding_dim)))
embeddings = np.zeros((int(word_count), int(embedding_dim)))
inword_list = {}
with open(path, encoding='utf-8') as f:
lines = f.readlines()
lines = tqdm.tqdm(lines)
for line in lines:
lines.set_description("Processing")
values = line.strip().split(" ")
if len(values) == 1 or len(values) == 2:
continue
index = text_field_words_dict.get(values[0]) # digit or None
if index:
vector = np.array([float(i) for i in values[1:]], dtype='float32')
embeddings[index] = vector
inword_list[index] = 1
f.close()
print("oov words initial by uniform embedding, maybe take a while......")
# sum_col = np.sum(embeddings, axis=0) / len(inword_list) # avg
uniform_col = np.random.uniform(-0.25, 0.25, int(embedding_dim)).round(6) # avg
for i in range(len(text_field_words_dict)):
if i not in inword_list and i != padID:
embeddings[i] = uniform_col
OOVWords = word_count - len(inword_list)
oov_radio = np.round(OOVWords / word_count, 6)
print("All Words = {}, InWords = {}, OOVWords = {}, OOV Radio={}".format(
word_count, len(inword_list), OOVWords, oov_radio))
return torch.from_numpy(embeddings).float()
def convert_list2dict(convert_list):
"""
:param convert_list: list type
:return: dict type
"""
list_dict = OrderedDict()
list_lower = []
for index, word in enumerate(convert_list):
list_lower.append(word.lower())
list_dict[word] = index
assert len(list_lower) == len(list_dict)
return list_dict, list_lower
| 9,941 | 37.988235 | 142 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/Embed_From_Pretrained.py | # @Author : bamtercelboo
# @Datetime : 2018/2/3 14:03
# @File : Embed_From_Pretrained.py
# @Last Modify Time : 2018/2/3 14:03
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Embed_From_Pretrained.py
FUNCTION : None
"""
import os
import sys
import torch
import torch.nn.init as init
import numpy as np
import random
import torch.nn as nn
import hyperparams as hy
torch.manual_seed(hy.seed_num)
random.seed(hy.seed_num)
def Pretrain_Embed(file, vocab_size, words2id, unk, padding):
# print("load pretrained embedding from {}".format(file))
# f = open(file, encoding='utf-8')
# allLines = f.readlines()
# indexs = set()
# info = allLines[0].strip().split(' ')
# embed_dim = len(info) - 1
# emb = nn.Embedding(vocab_size, embed_dim)
#
# # init.uniform(emb.weight, a=-np.sqrt(3 / embed_dim), b=np.sqrt(3 / embed_dim))
# oov_emb = torch.zeros(1, embed_dim).type(torch.FloatTensor)
# now_line = 0
# for line in allLines:
# now_line += 1
# sys.stdout.write("\rhandling with the {} line.".format(now_line))
# info = line.split(" ")
# wordID = words2id[info[0]]
# if wordID >= 0:
# indexs.add(wordID)
# for idx in range(embed_dim):
# val = float(info[idx + 1])
# emb.weight.data[wordID][idx] = val
# # oov_emb[0][idx] += val
# f.close()
# print("\nhandle finished")
#
# unkID = words2id[unk]
# paddingID = words2id[padding]
# for idx in range(embed_dim):
# emb.weight.data[paddingID][idx] = 0
# emb.weight.data[unkID][idx] = 0
#
# return emb, embed_dim
with open(file, encoding="UTF-8") as f:
allLines = f.readlines()
indexs = set()
info = allLines[0].strip().split(' ')
embDim = len(info) - 1
emb = nn.Embedding(vocab_size, embDim)
# init.uniform(emb.weight, a=-np.sqrt(3 / embDim), b=np.sqrt(3 / embDim))
oov_emb = torch.zeros(1, embDim).type(torch.FloatTensor)
now_line = 0
for line in allLines:
now_line += 1
sys.stdout.write("\rHandling with the {} line.".format(now_line))
info = line.split(' ')
wordID = words2id[info[0]]
if wordID >= 0:
indexs.add(wordID)
for idx in range(embDim):
val = float(info[idx + 1])
emb.weight.data[wordID][idx] = val
oov_emb[0][idx] += val
f.close()
print("\nHandle Finished.")
count = len(indexs) + 1
for idx in range(embDim):
oov_emb[0][idx] /= count
unkID = words2id[unk]
paddingID = words2id[padding]
for idx in range(embDim):
emb.weight.data[paddingID][idx] = 0
if unkID != -1:
for idx in range(embDim):
emb.weight.data[unkID][idx] = oov_emb[0][idx]
print("Load Embedding file: ", file, ", size: ", embDim)
oov = 0
for idx in range(vocab_size):
if idx not in indexs:
oov += 1
print("oov: ", oov, " total: ", vocab_size, "oov ratio: ", oov / vocab_size)
print("oov ", unk, "use avg value initialize")
return emb, embDim | 3,226 | 31.59596 | 85 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/Batch_Iterator_torch.py | # @Author : bamtercelboo
# @Datetime : 2018/1/30 15:55
# @File : Batch_Iterator.py.py
# @Last Modify Time : 2018/1/30 15:55
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Batch_Iterator.py
FUNCTION : None
"""
import torch
from torch.autograd import Variable
import random
from DataUtils.Common import *
torch.manual_seed(seed_num)
random.seed(seed_num)
class Batch_Features:
"""
Batch_Features
"""
def __init__(self):
self.batch_length = 0
self.inst = None
self.word_features = 0
self.char_features = 0
self.label_features = 0
self.sentence_length = []
self.desorted_indices = None
@staticmethod
def cuda(features):
"""
:param features:
:return:
"""
features.word_features = features.word_features.cuda()
features.label_features = features.label_features.cuda()
features.char_features = features.char_features.cuda()
class Iterators:
"""
Iterators
"""
def __init__(self, batch_size=None, data=None, operator=None, device=None, config=None):
self.config = config
self.batch_size = batch_size
self.data = data
self.device = device
self.operator = operator
self.operator_static = None
self.iterator = []
self.batch = []
self.features = []
self.data_iter = []
self.max_char_len = config.max_char_len
def createIterator(self):
"""
:param batch_size: batch size
:param data: data
:param operator:
:param config:
:return:
"""
assert isinstance(self.data, list), "ERROR: data must be in list [train_data,dev_data]"
assert isinstance(self.batch_size, list), "ERROR: batch_size must be in list [16,1,1]"
for id_data in range(len(self.data)):
print("***************** create {} iterator **************".format(id_data + 1))
self._convert_word2id(self.data[id_data], self.operator)
self.features = self._Create_Each_Iterator(insts=self.data[id_data], batch_size=self.batch_size[id_data],
operator=self.operator, device=self.device)
self.data_iter.append(self.features)
self.features = []
if len(self.data_iter) == 2:
return self.data_iter[0], self.data_iter[1]
if len(self.data_iter) == 3:
return self.data_iter[0], self.data_iter[1], self.data_iter[2]
@staticmethod
def _convert_word2id(insts, operator):
"""
:param insts:
:param operator:
:return:
"""
# print(len(insts))
# for index_inst, inst in enumerate(insts):
for inst in insts:
# copy with the word and pos
for index in range(inst.words_size):
word = inst.words[index]
wordId = operator.word_alphabet.from_string(word)
# if wordID is None:
if wordId == -1:
wordId = operator.word_unkId
inst.words_index.append(wordId)
label = inst.labels[index]
labelId = operator.label_alphabet.from_string(label)
inst.label_index.append(labelId)
char_index = []
for char in inst.chars[index]:
charId = operator.char_alphabet.from_string(char)
if charId == -1:
charId = operator.char_unkId
char_index.append(charId)
inst.chars_index.append(char_index)
def _Create_Each_Iterator(self, insts, batch_size, operator, device):
"""
:param insts:
:param batch_size:
:param operator:
:return:
"""
batch = []
count_inst = 0
for index, inst in enumerate(insts):
batch.append(inst)
count_inst += 1
# print(batch)
if len(batch) == batch_size or count_inst == len(insts):
one_batch = self._Create_Each_Batch(insts=batch, batch_size=batch_size, operator=operator, device=device)
self.features.append(one_batch)
batch = []
print("The all data has created iterator.")
return self.features
def _Create_Each_Batch(self, insts, batch_size, operator, device):
"""
:param insts:
:param batch_size:
:param operator:
:return:
"""
# print("create one batch......")
batch_length = len(insts)
# copy with the max length for padding
max_word_size = -1
max_label_size = -1
sentence_length = []
for inst in insts:
sentence_length.append(inst.words_size)
word_size = inst.words_size
if word_size > max_word_size:
max_word_size = word_size
if len(inst.labels) > max_label_size:
max_label_size = len(inst.labels)
assert max_word_size == max_label_size
# create with the Tensor/Variable
# word features
batch_word_features = torch.zeros(batch_length, max_word_size, device=cpu_device, requires_grad=True).long()
batch_char_features = torch.zeros(batch_length, max_word_size, self.max_char_len, device=cpu_device, requires_grad=True).long()
batch_label_features = torch.zeros(batch_length * max_word_size, device=cpu_device, requires_grad=True).long()
for id_inst in range(batch_length):
inst = insts[id_inst]
# copy with the word features
for id_word_index in range(max_word_size):
if id_word_index < inst.words_size:
batch_word_features.data[id_inst][id_word_index] = inst.words_index[id_word_index]
else:
batch_word_features.data[id_inst][id_word_index] = operator.word_paddingId
if id_word_index < len(inst.label_index):
batch_label_features.data[id_inst * max_word_size + id_word_index] = inst.label_index[id_word_index]
else:
batch_label_features.data[id_inst * max_word_size + id_word_index] = operator.label_paddingId
# batch_label_features.data[id_inst * max_word_size + id_word_index] = 0
# char
max_char_size = len(inst.chars_index[id_word_index]) if id_word_index < inst.words_size else 0
for id_word_c in range(self.max_char_len):
if id_word_c < max_char_size:
batch_char_features.data[id_inst][id_word_index][id_word_c] = inst.chars_index[id_word_index][id_word_c]
else:
batch_char_features.data[id_inst][id_word_index][id_word_c] = operator.char_paddingId
# batch
features = Batch_Features()
features.batch_length = batch_length
features.inst = insts
features.word_features = batch_word_features
features.char_features = batch_char_features
features.label_features = batch_label_features
features.sentence_length = sentence_length
features.desorted_indices = None
if device != cpu_device:
features.cuda(features)
return features
@staticmethod
def _prepare_pack_padded_sequence(inputs_words, seq_lengths, descending=True):
"""
:param inputs_words:
:param seq_lengths:
:param descending:
:return:
"""
sorted_seq_lengths, indices = torch.sort(torch.LongTensor(seq_lengths), descending=descending)
_, desorted_indices = torch.sort(indices, descending=False)
sorted_inputs_words = inputs_words[indices]
return sorted_inputs_words, sorted_seq_lengths.numpy(), desorted_indices
| 7,948 | 35.631336 | 135 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/Alphabet.py | # @Author : bamtercelboo
# @Datetime : 2018/1/30 15:54
# @File : Alphabet.py
# @Last Modify Time : 2018/1/30 15:54
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Alphabet.py
FUNCTION : None
"""
import os
import sys
import torch
import random
import collections
from DataUtils.Common import seed_num, unkkey, paddingkey
torch.manual_seed(seed_num)
random.seed(seed_num)
class CreateAlphabet:
"""
Class: Create_Alphabet
Function: Build Alphabet By Alphabet Class
Notice: The Class Need To Change So That Complete All Kinds Of Tasks
"""
def __init__(self, min_freq=1, train_data=None, dev_data=None, test_data=None, config=None):
# minimum vocab size
self.min_freq = min_freq
self.config = config
self.train_data = train_data
self.dev_data = dev_data
self.test_data = test_data
# storage word and label
self.word_state = collections.OrderedDict()
self.label_state = collections.OrderedDict()
self.char_state = collections.OrderedDict()
# self.word_state = {}
# self.label_state = {}
# unk and pad
self.word_state[unkkey] = self.min_freq
self.word_state[paddingkey] = self.min_freq
self.char_state[unkkey] = self.min_freq
self.char_state[paddingkey] = self.min_freq
# self.label_state[unkkey] = 1
self.label_state[paddingkey] = 1
# word and label Alphabet
self.word_alphabet = Alphabet(min_freq=self.min_freq)
self.char_alphabet = Alphabet(min_freq=self.min_freq)
self.label_alphabet = Alphabet()
self.pretrained_alphabet = Alphabet(min_freq=self.min_freq)
self.pretrained_alphabet_source = Alphabet(min_freq=self.min_freq)
# unk key
self.word_unkId = 0
self.char_unkId = 0
self.label_unkId = 0
# padding key
self.word_paddingId = 0
self.char_paddingId = 0
self.label_paddingId = 0
@staticmethod
def _build_data(train_data=None, dev_data=None, test_data=None):
"""
:param train_data:
:param dev_data:
:param test_data:
:return:
"""
# handle the data whether to fine_tune
"""
:param train data:
:param dev data:
:param test data:
:return: merged data
"""
assert train_data is not None, "The Train Data Is Not Allow Empty."
datasets = []
datasets.extend(train_data)
print("the length of train data {}".format(len(datasets)))
if dev_data is not None:
print("the length of dev data {}".format(len(dev_data)))
datasets.extend(dev_data)
if test_data is not None:
print("the length of test data {}".format(len(test_data)))
datasets.extend(test_data)
print("the length of data that create Alphabet {}".format(len(datasets)))
return datasets
def build_vocab(self):
"""
:param train_data:
:param dev_data:
:param test_data:
:param debug_index:
:return:
"""
train_data = self.train_data
dev_data = self.dev_data
test_data = self.test_data
print("Build Vocab Start...... ")
datasets = self._build_data(train_data=train_data, dev_data=dev_data, test_data=test_data)
# create the word Alphabet
for index, data in enumerate(datasets):
# word
for word in data.words:
if word not in self.word_state:
self.word_state[word] = 1
else:
self.word_state[word] += 1
# char
for char in data.chars:
# print(char)
for c in char:
if c.isalnum() is False:
continue
if c not in self.char_state:
self.char_state[c] = 1
else:
self.char_state[c] += 1
# label
for label in data.labels:
if label not in self.label_state:
self.label_state[label] = 1
else:
self.label_state[label] += 1
# print(self.char_state)
# exit()
# self.label_state[unkkey] = 1
# Create id2words and words2id by the Alphabet Class
self.word_alphabet.initial(self.word_state)
self.char_alphabet.initial(self.char_state)
self.label_alphabet.initial(self.label_state)
# unkId and paddingId
self.word_unkId = self.word_alphabet.from_string(unkkey)
self.char_unkId = self.char_alphabet.from_string(unkkey)
# self.label_unkId = self.label_alphabet.loadWord2idAndId2Word(unkkey)
self.word_paddingId = self.word_alphabet.from_string(paddingkey)
self.char_paddingId = self.char_alphabet.from_string(paddingkey)
self.label_paddingId = self.label_alphabet.from_string(paddingkey)
# fix the vocab
self.word_alphabet.set_fixed_flag(True)
self.label_alphabet.set_fixed_flag(True)
self.char_alphabet.set_fixed_flag(True)
class Alphabet:
"""
Class: Alphabet
Function: Build vocab
Params:
****** id2words: type(list),
****** word2id: type(dict)
****** vocab_size: vocab size
****** min_freq: vocab minimum freq
****** fixed_vocab: fix the vocab after build vocab
****** max_cap: max vocab size
"""
def __init__(self, min_freq=1):
self.id2words = []
self.words2id = collections.OrderedDict()
self.vocab_size = 0
self.min_freq = min_freq
self.max_cap = 1e8
self.fixed_vocab = False
def initial(self, data):
"""
:param data:
:return:
"""
for key in data:
if data[key] >= self.min_freq:
self.from_string(key)
self.set_fixed_flag(True)
def set_fixed_flag(self, bfixed):
"""
:param bfixed:
:return:
"""
self.fixed_vocab = bfixed
if (not self.fixed_vocab) and (self.vocab_size >= self.max_cap):
self.fixed_vocab = True
def from_string(self, string):
"""
:param string:
:return:
"""
if string in self.words2id:
return self.words2id[string]
else:
if not self.fixed_vocab:
newid = self.vocab_size
self.id2words.append(string)
self.words2id[string] = newid
self.vocab_size += 1
if self.vocab_size >= self.max_cap:
self.fixed_vocab = True
return newid
else:
return -1
def from_id(self, qid, defineStr=""):
"""
:param qid:
:param defineStr:
:return:
"""
if int(qid) < 0 or self.vocab_size <= qid:
return defineStr
else:
return self.id2words[qid]
def initial_from_pretrain(self, pretrain_file, unk, padding):
"""
:param pretrain_file:
:param unk:
:param padding:
:return:
"""
print("initial alphabet from {}".format(pretrain_file))
self.from_string(unk)
self.from_string(padding)
now_line = 0
with open(pretrain_file, encoding="UTF-8") as f:
for line in f.readlines():
now_line += 1
sys.stdout.write("\rhandling with {} line".format(now_line))
info = line.split(" ")
self.from_string(info[0])
f.close()
print("\nHandle Finished.")
| 7,881 | 30.277778 | 98 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/Batch_Iterator.py | # @Author : bamtercelboo
# @Datetime : 2018/1/30 15:55
# @File : Batch_Iterator.py.py
# @Last Modify Time : 2018/1/30 15:55
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Batch_Iterator.py
FUNCTION : None
"""
import torch
from torch.autograd import Variable
import random
import numpy as np
from DataUtils.Common import *
torch.manual_seed(seed_num)
random.seed(seed_num)
class Batch_Features:
"""
Batch_Features
"""
def __init__(self):
self.batch_length = 0
self.inst = None
self.word_features = 0
self.char_features = 0
self.label_features = 0
self.sentence_length = []
self.desorted_indices = None
@staticmethod
def cuda(features):
"""
:param features:
:return:
"""
features.word_features = features.word_features.cuda()
features.label_features = features.label_features.cuda()
features.char_features = features.char_features.cuda()
class Iterators:
"""
Iterators
"""
def __init__(self, batch_size=None, data=None, operator=None, device=None, config=None):
self.config = config
self.batch_size = batch_size
self.data = data
self.device = device
self.operator = operator
self.operator_static = None
self.iterator = []
self.batch = []
self.features = []
self.data_iter = []
self.max_char_len = config.max_char_len
def createIterator(self):
"""
:param batch_size: batch size
:param data: data
:param operator:
:param config:
:return:
"""
assert isinstance(self.data, list), "ERROR: data must be in list [train_data,dev_data]"
assert isinstance(self.batch_size, list), "ERROR: batch_size must be in list [16,1,1]"
for id_data in range(len(self.data)):
print("***************** create {} iterator **************".format(id_data + 1))
self._convert_word2id(self.data[id_data], self.operator)
self.features = self._Create_Each_Iterator(insts=self.data[id_data], batch_size=self.batch_size[id_data],
operator=self.operator, device=self.device)
self.data_iter.append(self.features)
self.features = []
if len(self.data_iter) == 2:
return self.data_iter[0], self.data_iter[1]
if len(self.data_iter) == 3:
return self.data_iter[0], self.data_iter[1], self.data_iter[2]
@staticmethod
def _convert_word2id(insts, operator):
"""
:param insts:
:param operator:
:return:
"""
# print(len(insts))
# for index_inst, inst in enumerate(insts):
for inst in insts:
# copy with the word and pos
for index in range(inst.words_size):
word = inst.words[index]
wordId = operator.word_alphabet.from_string(word)
# if wordID is None:
if wordId == -1:
wordId = operator.word_unkId
inst.words_index.append(wordId)
label = inst.labels[index]
labelId = operator.label_alphabet.from_string(label)
inst.label_index.append(labelId)
char_index = []
for char in inst.chars[index]:
charId = operator.char_alphabet.from_string(char)
if charId == -1:
charId = operator.char_unkId
char_index.append(charId)
inst.chars_index.append(char_index)
def _Create_Each_Iterator(self, insts, batch_size, operator, device):
"""
:param insts:
:param batch_size:
:param operator:
:return:
"""
batch = []
count_inst = 0
for index, inst in enumerate(insts):
batch.append(inst)
count_inst += 1
# print(batch)
if len(batch) == batch_size or count_inst == len(insts):
one_batch = self._Create_Each_Batch(insts=batch, batch_size=batch_size, operator=operator, device=device)
self.features.append(one_batch)
batch = []
print("The all data has created iterator.")
return self.features
def _Create_Each_Batch(self, insts, batch_size, operator, device):
"""
:param insts:
:param batch_size:
:param operator:
:return:
"""
# print("create one batch......")
batch_length = len(insts)
# copy with the max length for padding
max_word_size = -1
max_label_size = -1
sentence_length = []
for inst in insts:
sentence_length.append(inst.words_size)
word_size = inst.words_size
if word_size > max_word_size:
max_word_size = word_size
if len(inst.labels) > max_label_size:
max_label_size = len(inst.labels)
assert max_word_size == max_label_size
# create with the Tensor/Variable
# word features
# batch_word_features = torch.zeros(batch_length, max_word_size, device=cpu_device, requires_grad=True).long()
# batch_char_features = torch.zeros(batch_length, max_word_size, self.max_char_len, device=cpu_device, requires_grad=True).long()
# batch_label_features = torch.zeros(batch_length * max_word_size, device=cpu_device, requires_grad=True).long()
batch_word_features = np.zeros((batch_length, max_word_size))
batch_char_features = np.zeros((batch_length, max_word_size, self.max_char_len))
batch_label_features = np.zeros((batch_length * max_word_size))
for id_inst in range(batch_length):
inst = insts[id_inst]
# copy with the word features
for id_word_index in range(max_word_size):
if id_word_index < inst.words_size:
batch_word_features[id_inst][id_word_index] = inst.words_index[id_word_index]
else:
batch_word_features[id_inst][id_word_index] = operator.word_paddingId
if id_word_index < len(inst.label_index):
batch_label_features[id_inst * max_word_size + id_word_index] = inst.label_index[id_word_index]
else:
batch_label_features[id_inst * max_word_size + id_word_index] = operator.label_paddingId
# char
max_char_size = len(inst.chars_index[id_word_index]) if id_word_index < inst.words_size else 0
for id_word_c in range(self.max_char_len):
if id_word_c < max_char_size:
batch_char_features[id_inst][id_word_index][id_word_c] = inst.chars_index[id_word_index][id_word_c]
else:
batch_char_features[id_inst][id_word_index][id_word_c] = operator.char_paddingId
batch_word_features = torch.from_numpy(batch_word_features).long()
batch_char_features = torch.from_numpy(batch_char_features).long()
batch_label_features = torch.from_numpy(batch_label_features).long()
# batch
features = Batch_Features()
features.batch_length = batch_length
features.inst = insts
features.word_features = batch_word_features
features.char_features = batch_char_features
features.label_features = batch_label_features
features.sentence_length = sentence_length
features.desorted_indices = None
if device != cpu_device:
features.cuda(features)
return features
@staticmethod
def _prepare_pack_padded_sequence(inputs_words, seq_lengths, descending=True):
"""
:param inputs_words:
:param seq_lengths:
:param descending:
:return:
"""
sorted_seq_lengths, indices = torch.sort(torch.LongTensor(seq_lengths), descending=descending)
_, desorted_indices = torch.sort(indices, descending=False)
sorted_inputs_words = inputs_words[indices]
return sorted_inputs_words, sorted_seq_lengths.numpy(), desorted_indices
| 8,317 | 36.133929 | 137 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/Embed.py | # @Author : bamtercelboo
# @Datetime : 2018/8/27 15:34
# @File : Embed.py
# @Last Modify Time : 2018/8/27 15:34
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Embed.py
FUNCTION : None
"""
import os
import sys
import time
import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
from collections import OrderedDict
from DataUtils.Common import *
torch.manual_seed(seed_num)
np.random.seed(seed_num)
class Embed(object):
"""
Embed
"""
def __init__(self, path, words_dict, embed_type, pad):
self.embed_type_enum = ["zero", "avg", "uniform", "nn"]
self.path = path
self.words_dict = words_dict
self.embed_type = embed_type
self.pad = pad
# print(self.words_dict)
if not isinstance(self.words_dict, dict):
self.words_dict, self.words_list = self._list2dict(self.words_dict)
if pad is not None: self.padID = self.words_dict[pad]
# print(self.words_dict)
self.dim, self.words_count = self._get_dim(path=self.path), len(self.words_dict)
self.exact_count, self.fuzzy_count, self.oov_count = 0, 0, 0
def get_embed(self):
"""
:return:
"""
embed_dict = None
if self.embed_type in self.embed_type_enum:
embed_dict = self._read_file(path=self.path)
else:
print("embed_type illegal, must be in {}".format(self.embed_type_enum))
exit()
# print(embed_dict)
embed = None
if self.embed_type == "nn":
embed = self._nn_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "zero":
embed = self._zeros_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "uniform":
embed = self._uniform_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "avg":
embed = self._avg_embed(embed_dict=embed_dict, words_dict=self.words_dict)
# print(embed)
self.info()
return embed
def _zeros_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by zeros for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _nn_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by nn.Embedding for out of vocabulary.")
embed = nn.Embedding(int(self.words_count), int(self.dim))
init.xavier_uniform_(embed.weight.data)
embeddings = np.array(embed.weight.data)
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
embeddings[self.padID] = 0
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _uniform_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by uniform for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
inword_list = {}
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
inword_list[words_dict[word]] = 1
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
inword_list[words_dict[word]] = 1
self.fuzzy_count += 1
else:
self.oov_count += 1
uniform_col = np.random.uniform(-0.25, 0.25, int(self.dim)).round(6) # uniform
for i in range(len(words_dict)):
if i not in inword_list and i != self.padID:
embeddings[i] = uniform_col
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _avg_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by avg for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
inword_list = {}
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
inword_list[words_dict[word]] = 1
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
inword_list[words_dict[word]] = 1
self.fuzzy_count += 1
else:
self.oov_count += 1
sum_col = np.sum(embeddings, axis=0) / len(inword_list) # avg
for i in range(len(words_dict)):
if i not in inword_list and i != self.padID:
embeddings[i] = sum_col
final_embed = torch.from_numpy(embeddings).float()
return final_embed
@staticmethod
def _read_file(path):
"""
:param path: embed file path
:return:
"""
embed_dict = {}
with open(path, encoding='utf-8') as f:
lines = f.readlines()
lines = tqdm.tqdm(lines)
for line in lines:
values = line.strip().split(' ')
if len(values) == 1 or len(values) == 2 or len(values) == 3:
continue
w, v = values[0], values[1:]
embed_dict[w] = v
return embed_dict
def info(self):
"""
:return:
"""
total_count = self.exact_count + self.fuzzy_count
print("Words count {}, Embed dim {}.".format(self.words_count, self.dim))
print("Exact count {} / {}".format(self.exact_count, self.words_count))
print("Fuzzy count {} / {}".format(self.fuzzy_count, self.words_count))
print(" INV count {} / {}".format(total_count, self.words_count))
print(" OOV count {} / {}".format(self.oov_count, self.words_count))
print(" OOV radio ===> {}%".format(np.round((self.oov_count / self.words_count) * 100, 2)))
print(40 * "*")
@staticmethod
def _get_dim(path):
"""
:param path:
:return:
"""
embedding_dim = -1
with open(path, encoding='utf-8') as f:
for line in f:
line_split = line.strip().split(' ')
if len(line_split) == 1:
embedding_dim = line_split[0]
break
elif len(line_split) == 2:
embedding_dim = line_split[1]
break
else:
embedding_dim = len(line_split) - 1
break
return embedding_dim
@staticmethod
def _list2dict(convert_list):
"""
:param convert_list:
:return:
"""
list_dict = OrderedDict()
list_lower = []
for index, word in enumerate(convert_list):
list_lower.append(word.lower())
list_dict[word] = index
assert len(list_lower) == len(list_dict)
return list_dict, list_lower
| 8,503 | 36.462555 | 118 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/DataUtils/Optim.py | # -*- coding: utf-8 -*-
import torch.optim
from torch.nn.utils.clip_grad import clip_grad_norm_
# Setup optimizer (should always come after model.cuda())
# iterable of dicts for per-param options where each dict
# is {'params' : [p1, p2, p3...]}.update(generic optimizer args)
# Example:
# optim.SGD([
# {'params': model.base.parameters()},
# {'params': model.classifier.parameters(), 'lr': 1e-3}
# ], lr=1e-2, momentum=0.9)
def decay_learning_rate(optimizer, epoch, init_lr, lr_decay):
"""衰减学习率
Args:
epoch: int, 迭代次数
init_lr: 初始学习率
"""
lr = init_lr / (1 + lr_decay * epoch)
print('learning rate: {0}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
class Optimizer(object):
# Class dict to map lowercase identifiers to actual classes
methods = {
'Adadelta': torch.optim.Adadelta,
'Adagrad': torch.optim.Adagrad,
'Adam': torch.optim.Adam,
'SGD': torch.optim.SGD,
'ASGD': torch.optim.ASGD,
'Rprop': torch.optim.Rprop,
'RMSprop': torch.optim.RMSprop,
}
@staticmethod
def get_params(model):
"""Returns all name, parameter pairs with requires_grad=True."""
return list(
filter(lambda p: p[1].requires_grad, model.named_parameters()))
def __init__(self,
name,
model,
lr=0,
weight_decay=0,
grad_clip=None,
optim_args=None,
momentum=None,
**kwargs):
"""
:param decay_method: Method of learning rate decay.
"""
self.name = name
self.model = model
self.init_lr = lr
self.weight_decay = weight_decay
self.momentum = momentum
# self.gclip = grad_clip
self.gclip = None if grad_clip == "None" else float(grad_clip)
# print(self.gclip)
self._count = 0
# TODO:
# pass external optimizer configs
if optim_args is None:
optim_args = {}
self.optim_args = optim_args
# If an explicit lr given, pass it to torch optimizer
if self.init_lr > 0:
self.optim_args['lr'] = self.init_lr
if self.name == "SGD" and self.momentum is not None:
self.optim_args['momentum'] = self.momentum
# Get all parameters that require grads
self.named_params = self.get_params(self.model)
# Filter out names for gradient clipping
self.params = [param for (name, param) in self.named_params]
if self.weight_decay > 0:
weight_group = {
'params': [p for n, p in self.named_params if 'bias' not in n],
'weight_decay': self.weight_decay,
}
bias_group = {
'params': [p for n, p in self.named_params if 'bias' in n],
}
self.param_groups = [weight_group, bias_group]
# elif self.name == "SGD" and self.momentum is not None:
else:
self.param_groups = [{'params': self.params}]
# Safety check
n_params = len(self.params)
for group in self.param_groups:
n_params -= len(group['params'])
assert n_params == 0, "Not all params are passed to the optimizer."
# Create the actual optimizer
self.optim = self.methods[self.name](self.param_groups,
**self.optim_args)
# Assign shortcuts
self.zero_grad = self.optim.zero_grad
# Skip useless if evaluation logic if gradient_clip not requested
if self.gclip == 0 or self.gclip is None:
self.step = self.optim.step
def zero_grad(self):
self.optim.zero_grad()
def step(self, closure=None):
"""Gradient clipping aware step()."""
if self.gclip is not None and self.gclip > 0:
# print("aaaa")
clip_grad_norm_(self.params, self.gclip)
self.optim.step(closure)
def rescale_lrate(self, scale, min_lrate=-1.0):
if isinstance(scale, list):
for scale_, group in zip(scale, self.optim.param_groups):
group['lr'] = max(group['lr'] * scale_, min_lrate)
else:
for group in self.optim.param_groups:
group['lr'] = max(group['lr'] * scale, min_lrate)
def get_lrate(self):
for group in self.optim.param_groups:
yield group['lr']
def set_lrate(self, lr):
if isinstance(lr, list):
for lr_, group in zip(lr, self.optim.param_groups):
group['lr'] = lr_
else:
for group in self.optim.param_groups:
group['lr'] = lr
def __repr__(self):
s = "Optimizer => {} (lr: {}, weight_decay: {}, g_clip: {})".format(
self.name, self.init_lr, self.weight_decay, self.gclip)
return s | 5,041 | 30.911392 | 79 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/models/BiLSTM.py | # @Author : bamtercelboo
# @Datetime : 2018/8/17 16:06
# @File : BiLSTM.py
# @Last Modify Time : 2018/8/17 16:06
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : BiLSTM.py
FUNCTION : None
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import random
from DataUtils.Common import *
from models.initialize import *
from models.modelHelp import prepare_pack_padded_sequence
torch.manual_seed(seed_num)
random.seed(seed_num)
class BiLSTM(nn.Module):
"""
BiLSTM
"""
def __init__(self, **kwargs):
super(BiLSTM, self).__init__()
for k in kwargs:
self.__setattr__(k, kwargs[k])
V = self.embed_num
D = self.embed_dim
C = self.label_num
paddingId = self.paddingId
self.embed = nn.Embedding(V, D, padding_idx=paddingId)
if self.pretrained_embed:
self.embed.weight.data.copy_(self.pretrained_weight)
else:
init_embedding(self.embed.weight)
self.dropout_embed = nn.Dropout(self.dropout_emb)
self.dropout = nn.Dropout(self.dropout)
self.bilstm = nn.LSTM(input_size=D, hidden_size=self.lstm_hiddens, num_layers=self.lstm_layers,
bidirectional=True, batch_first=True, bias=True)
self.linear = nn.Linear(in_features=self.lstm_hiddens * 2, out_features=C, bias=True)
init_linear(self.linear)
def forward(self, word, sentence_length):
"""
:param word:
:param sentence_length:
:param desorted_indices:
:return:
"""
word, sentence_length, desorted_indices = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
x = self.embed(word) # (N,W,D)
x = self.dropout_embed(x)
packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
x, _ = self.bilstm(packed_embed)
x, _ = pad_packed_sequence(x, batch_first=True)
x = x[desorted_indices]
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
| 2,184 | 28.931507 | 121 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/models/CRF.py | # @Author : bamtercelboo
# @Datetime : 2018/9/14 9:51
# @File : CRF.py
# @Last Modify Time : 2018/9/14 9:51
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : CRF.py
FUNCTION : None
REFERENCE : https://github.com/jiesutd/NCRFpp/blob/master/model/crf.py
"""
import torch
from torch.autograd.variable import Variable
import torch.nn as nn
def log_sum_exp(vec, m_size):
"""
Args:
vec: size=(batch_size, vanishing_dim, hidden_dim)
m_size: hidden_dim
Returns:
size=(batch_size, hidden_dim)
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(
torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1, m_size)
class CRF(nn.Module):
"""
CRF
"""
def __init__(self, **kwargs):
"""
kwargs:
target_size: int, target size
device: str, device
"""
super(CRF, self).__init__()
for k in kwargs:
self.__setattr__(k, kwargs[k])
device = self.device
# init transitions
self.START_TAG, self.STOP_TAG = -2, -1
init_transitions = torch.zeros(self.target_size + 2, self.target_size + 2, device=device)
init_transitions[:, self.START_TAG] = -10000.0
init_transitions[self.STOP_TAG, :] = -10000.0
self.transitions = nn.Parameter(init_transitions)
def _forward_alg(self, feats, mask):
"""
Do the forward algorithm to compute the partition function (batched).
Args:
feats: size=(batch_size, seq_len, self.target_size+2)
mask: size=(batch_size, seq_len)
Returns:
xxx
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
""" be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1) """
feats = feats.transpose(1,0).contiguous().view(ins_num,1, tag_size).expand(ins_num, tag_size, tag_size)
""" need to consider start """
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
""" only need start from start_tag """
partition = inivalues[:, self.START_TAG, :].clone().view(batch_size, tag_size, 1) # bat_size * to_target_size
"""
add start score (from start to all tag, duplicate to batch_size)
partition = partition + self.transitions[START_TAG,:].view(1, tag_size, 1).expand(batch_size, tag_size, 1)
iter over last scores
"""
for idx, cur_values in seq_iter:
"""
previous to_target is current from_target
partition: previous results log(exp(from_target)), #(batch_size * from_target)
cur_values: bat_size * from_target * to_target
"""
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
mask_idx = mask[idx, :].view(batch_size, 1).expand(batch_size, tag_size)
""" effective updated partition part, only keep the partition value of mask value = 1 """
masked_cur_partition = cur_partition.masked_select(mask_idx)
""" let mask_idx broadcastable, to disable warning """
mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)
""" replace the partition where the maskvalue=1, other partition value keeps the same """
partition.masked_scatter_(mask_idx, masked_cur_partition)
"""
until the last state, add transition score for all partition (and do log_sum_exp)
then select the value in STOP_TAG
"""
cur_values = self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
final_partition = cur_partition[:, self.STOP_TAG]
return final_partition.sum(), scores
def _viterbi_decode(self, feats, mask):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
mask: (batch, seq_len)
output:
decode_idx: (batch, seq_len) decoded sequence
path_score: (batch, 1) corresponding score for each sequence (to be implementated)
"""
# print(feats.size())
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
# assert(tag_size == self.tagset_size+2)
""" calculate sentence length for each sentence """
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
""" mask to (seq_len, batch_size) """
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
""" be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1) """
feats = feats.transpose(1,0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
""" need to consider start """
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
# record the position of best score
back_points = list()
partition_history = list()
## reverse mask (bug for mask = 1- mask, use this as alternative choice)
# mask = 1 + (-1)*mask
mask = (1 - mask.long()).byte()
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
""" only need start from start_tag """
partition = inivalues[:, self.START_TAG, :].clone().view(batch_size, tag_size) # bat_size * to_target_size
partition_history.append(partition)
# iter over last scores
for idx, cur_values in seq_iter:
"""
previous to_target is current from_target
partition: previous results log(exp(from_target)), #(batch_size * from_target)
cur_values: batch_size * from_target * to_target
"""
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
""" forscores, cur_bp = torch.max(cur_values[:,:-2,:], 1) # do not consider START_TAG/STOP_TAG """
partition, cur_bp = torch.max(cur_values, 1)
partition_history.append(partition)
"""
cur_bp: (batch_size, tag_size) max source score position in current tag
set padded label as 0, which will be filtered in post processing
"""
cur_bp.masked_fill_(mask[idx].view(batch_size, 1).expand(batch_size, tag_size).bool(), 0)
back_points.append(cur_bp)
""" add score to final STOP_TAG """
partition_history = torch.cat(partition_history, 0).view(seq_len, batch_size, -1).transpose(1, 0).contiguous() ## (batch_size, seq_len. tag_size)
""" get the last position for each setences, and select the last partitions using gather() """
last_position = length_mask.view(batch_size,1,1).expand(batch_size, 1, tag_size) -1
last_partition = torch.gather(partition_history, 1, last_position).view(batch_size,tag_size,1)
""" calculate the score from last partition to end state (and then select the STOP_TAG from it) """
last_values = last_partition.expand(batch_size, tag_size, tag_size) + self.transitions.view(1,tag_size, tag_size).expand(batch_size, tag_size, tag_size)
_, last_bp = torch.max(last_values, 1)
pad_zero = torch.zeros(batch_size, tag_size, device=self.device, requires_grad=True).long()
back_points.append(pad_zero)
back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size)
""" elect end ids in STOP_TAG """
pointer = last_bp[:, self.STOP_TAG]
insert_last = pointer.contiguous().view(batch_size,1,1).expand(batch_size,1, tag_size)
back_points = back_points.transpose(1,0).contiguous()
"""move the end ids(expand to tag_size) to the corresponding position of back_points to replace the 0 values """
back_points.scatter_(1, last_position, insert_last)
back_points = back_points.transpose(1,0).contiguous()
""" decode from the end, padded position ids are 0, which will be filtered if following evaluation """
# decode_idx = Variable(torch.LongTensor(seq_len, batch_size))
decode_idx = torch.empty(seq_len, batch_size, device=self.device, requires_grad=True).long()
decode_idx[-1] = pointer.detach()
for idx in range(len(back_points)-2, -1, -1):
pointer = torch.gather(back_points[idx], 1, pointer.contiguous().view(batch_size, 1))
decode_idx[idx] = pointer.detach().view(batch_size)
path_score = None
decode_idx = decode_idx.transpose(1, 0)
return path_score, decode_idx
def forward(self, feats, mask):
"""
:param feats:
:param mask:
:return:
"""
path_score, best_path = self._viterbi_decode(feats, mask)
return path_score, best_path
def _score_sentence(self, scores, mask, tags):
"""
Args:
scores: size=(seq_len, batch_size, tag_size, tag_size)
mask: size=(batch_size, seq_len)
tags: size=(batch_size, seq_len)
Returns:
score:
"""
# print(scores.size())
batch_size = scores.size(1)
seq_len = scores.size(0)
tag_size = scores.size(-1)
tags = tags.view(batch_size, seq_len)
""" convert tag value into a new format, recorded label bigram information to index """
# new_tags = Variable(torch.LongTensor(batch_size, seq_len))
new_tags = torch.empty(batch_size, seq_len, device=self.device, requires_grad=True).long()
for idx in range(seq_len):
if idx == 0:
new_tags[:, 0] = (tag_size - 2) * tag_size + tags[:, 0]
else:
new_tags[:, idx] = tags[:, idx-1] * tag_size + tags[:, idx]
""" transition for label to STOP_TAG """
end_transition = self.transitions[:, self.STOP_TAG].contiguous().view(1, tag_size).expand(batch_size, tag_size)
""" length for batch, last word position = length - 1 """
length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long()
""" index the label id of last word """
end_ids = torch.gather(tags, 1, length_mask-1)
""" index the transition score for end_id to STOP_TAG """
end_energy = torch.gather(end_transition, 1, end_ids)
""" convert tag as (seq_len, batch_size, 1) """
new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1)
""" need convert tags id to search from 400 positions of scores """
tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len, batch_size)
tg_energy = tg_energy.masked_select(mask.transpose(1, 0))
"""
add all score together
gold_score = start_energy.sum() + tg_energy.sum() + end_energy.sum()
"""
gold_score = tg_energy.sum() + end_energy.sum()
return gold_score
def neg_log_likelihood_loss(self, feats, mask, tags):
"""
Args:
feats: size=(batch_size, seq_len, tag_size)
mask: size=(batch_size, seq_len)
tags: size=(batch_size, seq_len)
"""
batch_size = feats.size(0)
forward_score, scores = self._forward_alg(feats, mask)
gold_score = self._score_sentence(scores, mask, tags)
return forward_score - gold_score
| 12,355 | 45.104478 | 198 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/models/modelHelp.py | # @Author : bamtercelboo
# @Datetime : 2018/9/15 19:09
# @File : modelHelp.py
# @Last Modify Time : 2018/9/15 19:09
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : modelHelp.py
FUNCTION : None
"""
import torch
import random
from DataUtils.Common import *
torch.manual_seed(seed_num)
random.seed(seed_num)
def prepare_pack_padded_sequence(inputs_words, seq_lengths, device="cpu", descending=True):
"""
:param device:
:param inputs_words:
:param seq_lengths:
:param descending:
:return:
"""
sorted_seq_lengths, indices = torch.sort(torch.Tensor(seq_lengths).long(), descending=descending)
if device != cpu_device:
sorted_seq_lengths, indices = sorted_seq_lengths.cuda(), indices.cuda()
_, desorted_indices = torch.sort(indices, descending=False)
sorted_inputs_words = inputs_words[indices]
return sorted_inputs_words, sorted_seq_lengths.cpu().numpy(), desorted_indices
| 948 | 26.114286 | 101 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/models/initialize.py | # @Author : bamtercelboo
# @Datetime : 2018/8/25 9:15
# @File : initialize.py
# @Last Modify Time : 2018/8/25 9:15
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : initialize.py
FUNCTION : None
"""
import numpy as np
import torch
import torch.nn as nn
def init_cnn_weight(cnn_layer, seed=1337):
"""初始化cnn层权重
Args:
cnn_layer: weight.size() == [nb_filter, in_channels, [kernel_size]]
seed: int
"""
filter_nums = cnn_layer.weight.size(0)
kernel_size = cnn_layer.weight.size()[2:]
scope = np.sqrt(2. / (filter_nums * np.prod(kernel_size)))
torch.manual_seed(seed)
nn.init.normal_(cnn_layer.weight, -scope, scope)
cnn_layer.bias.data.zero_()
def init_cnn(cnn_layer, seed=1337):
"""初始化cnn层权重
Args:
cnn_layer: weight.size() == [nb_filter, in_channels, [kernel_size]]
seed: int
"""
filter_nums = cnn_layer.weight.size(0)
kernel_size = cnn_layer.weight.size()[2:]
scope = np.sqrt(2. / (filter_nums * np.prod(kernel_size)))
torch.manual_seed(seed)
nn.init.xavier_normal_(cnn_layer.weight)
cnn_layer.bias.data.uniform_(-scope, scope)
def init_lstm_weight(lstm, num_layer=1, seed=1337):
"""初始化lstm权重
Args:
lstm: torch.nn.LSTM
num_layer: int, lstm层数
seed: int
"""
for i in range(num_layer):
weight_h = getattr(lstm, 'weight_hh_l{0}'.format(i))
scope = np.sqrt(6.0 / (weight_h.size(0)/4. + weight_h.size(1)))
torch.manual_seed(seed)
nn.init.uniform_(getattr(lstm, 'weight_hh_l{0}'.format(i)), -scope, scope)
weight_i = getattr(lstm, 'weight_ih_l{0}'.format(i))
scope = np.sqrt(6.0 / (weight_i.size(0)/4. + weight_i.size(1)))
torch.manual_seed(seed)
nn.init.uniform_(getattr(lstm, 'weight_ih_l{0}'.format(i)), -scope, scope)
if lstm.bias:
for i in range(num_layer):
weight_h = getattr(lstm, 'bias_hh_l{0}'.format(i))
weight_h.data.zero_()
weight_h.data[lstm.hidden_size: 2*lstm.hidden_size] = 1
weight_i = getattr(lstm, 'bias_ih_l{0}'.format(i))
weight_i.data.zero_()
weight_i.data[lstm.hidden_size: 2*lstm.hidden_size] = 1
def init_linear(input_linear, seed=1337):
"""初始化全连接层权重
"""
torch.manual_seed(seed)
scope = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear.weight.size(1)))
nn.init.uniform_(input_linear.weight, -scope, scope)
# nn.init.uniform(input_linear.bias, -scope, scope)
if input_linear.bias is not None:
input_linear.bias.data.zero_()
def init_linear_weight_bias(input_linear, seed=1337):
"""
:param input_linear:
:param seed:
:return:
"""
torch.manual_seed(seed)
nn.init.xavier_uniform_(input_linear.weight)
scope = np.sqrt(6.0 / (input_linear.weight.size(0) + 1))
if input_linear.bias is not None:
input_linear.bias.data.uniform_(-scope, scope)
def init_embedding(input_embedding, seed=666):
"""初始化embedding层权重
"""
torch.manual_seed(seed)
scope = np.sqrt(3.0 / input_embedding.size(1))
nn.init.uniform_(input_embedding, -scope, scope)
def init_embed(input_embedding, seed=656):
"""初始化embedding层权重
"""
torch.manual_seed(seed)
nn.init.xavier_uniform_(input_embedding)
| 3,324 | 28.6875 | 86 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/models/Sequence_Label.py | # @Author : bamtercelboo
# @Datetime : 2018/9/14 8:43
# @File : Sequence_Label.py
# @Last Modify Time : 2018/9/14 8:43
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Sequence_Label.py
FUNCTION : None
"""
import torch
import torch.nn as nn
import random
import numpy as np
import time
from models.BiLSTM import BiLSTM
from models.BiLSTM_CNN import BiLSTM_CNN
from models.CRF import CRF
from DataUtils.Common import *
torch.manual_seed(seed_num)
random.seed(seed_num)
class Sequence_Label(nn.Module):
"""
Sequence_Label
"""
def __init__(self, config):
super(Sequence_Label, self).__init__()
self.config = config
# embed
self.embed_num = config.embed_num
self.embed_dim = config.embed_dim
self.label_num = config.class_num
self.paddingId = config.paddingId
# dropout
self.dropout_emb = config.dropout_emb
self.dropout = config.dropout
# lstm
self.lstm_hiddens = config.lstm_hiddens
self.lstm_layers = config.lstm_layers
# pretrain
self.pretrained_embed = config.pretrained_embed
self.pretrained_weight = config.pretrained_weight
# char
self.use_char = config.use_char
self.char_embed_num = config.char_embed_num
self.char_paddingId = config.char_paddingId
self.char_dim = config.char_dim
self.conv_filter_sizes = self._conv_filter(config.conv_filter_sizes)
self.conv_filter_nums = self._conv_filter(config.conv_filter_nums)
assert len(self.conv_filter_sizes) == len(self.conv_filter_nums)
# print(self.conv_filter_nums)
# print(self.conv_filter_sizes)
# exit()
# use crf
self.use_crf = config.use_crf
# cuda or cpu
self.device = config.device
self.target_size = self.label_num if self.use_crf is False else self.label_num + 2
if self.use_char is True:
self.encoder_model = BiLSTM_CNN(embed_num=self.embed_num, embed_dim=self.embed_dim, label_num=self.target_size,
paddingId=self.paddingId, dropout_emb=self.dropout_emb, dropout=self.dropout,
lstm_hiddens=self.lstm_hiddens, lstm_layers=self.lstm_layers,
pretrained_embed=self.pretrained_embed, pretrained_weight=self.pretrained_weight,
char_embed_num=self.char_embed_num, char_dim=self.char_dim,
char_paddingId=self.char_paddingId, conv_filter_sizes=self.conv_filter_sizes,
conv_filter_nums=self.conv_filter_nums, device=self.device)
else:
self.encoder_model = BiLSTM(embed_num=self.embed_num, embed_dim=self.embed_dim, label_num=self.target_size,
paddingId=self.paddingId, dropout_emb=self.dropout_emb, dropout=self.dropout,
lstm_hiddens=self.lstm_hiddens, lstm_layers=self.lstm_layers,
pretrained_embed=self.pretrained_embed, pretrained_weight=self.pretrained_weight,
device=self.device)
if self.use_crf is True:
args_crf = dict({'target_size': self.label_num, 'device': self.device})
self.crf_layer = CRF(**args_crf)
@staticmethod
def _conv_filter(str_list):
"""
:param str_list:
:return:
"""
int_list = []
str_list = str_list.split(",")
for str in str_list:
int_list.append(int(str))
return int_list
def forward(self, word, char, sentence_length, train=False):
"""
:param char:
:param word:
:param sentence_length:
:param train:
:return:
"""
if self.use_char is True:
encoder_output = self.encoder_model(word, char, sentence_length)
return encoder_output
else:
encoder_output = self.encoder_model(word, sentence_length)
return encoder_output
| 4,205 | 36.553571 | 125 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/models/BiLSTM_CNN.py | # @Author : bamtercelboo
# @Datetime : 2018/8/17 16:06
# @File : BiLSTM.py
# @Last Modify Time : 2018/8/17 16:06
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : BiLSTM.py
FUNCTION : None
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import random
from models.initialize import *
from DataUtils.Common import *
from torch.nn import init
from models.modelHelp import prepare_pack_padded_sequence
torch.manual_seed(seed_num)
random.seed(seed_num)
class BiLSTM_CNN(nn.Module):
"""
BiLSTM_CNN
"""
def __init__(self, **kwargs):
super(BiLSTM_CNN, self).__init__()
for k in kwargs:
self.__setattr__(k, kwargs[k])
V = self.embed_num
D = self.embed_dim
C = self.label_num
paddingId = self.paddingId
char_paddingId = self.char_paddingId
# word embedding layer
self.embed = nn.Embedding(V, D, padding_idx=paddingId)
if self.pretrained_embed:
self.embed.weight.data.copy_(self.pretrained_weight)
# char embedding layer
self.char_embedding = nn.Embedding(self.char_embed_num, self.char_dim, padding_idx=char_paddingId)
# init_embedding(self.char_embedding.weight)
init_embed(self.char_embedding.weight)
# dropout
self.dropout_embed = nn.Dropout(self.dropout_emb)
self.dropout = nn.Dropout(self.dropout)
# cnn
# self.char_encoders = nn.ModuleList()
self.char_encoders = []
for i, filter_size in enumerate(self.conv_filter_sizes):
f = nn.Conv3d(in_channels=1, out_channels=self.conv_filter_nums[i], kernel_size=(1, filter_size, self.char_dim))
self.char_encoders.append(f)
for conv in self.char_encoders:
if self.device != cpu_device:
conv.cuda()
lstm_input_dim = D + sum(self.conv_filter_nums)
self.bilstm = nn.LSTM(input_size=lstm_input_dim, hidden_size=self.lstm_hiddens, num_layers=self.lstm_layers,
bidirectional=True, batch_first=True, bias=True)
self.linear = nn.Linear(in_features=self.lstm_hiddens * 2, out_features=C, bias=True)
init_linear_weight_bias(self.linear)
def _char_forward(self, inputs):
"""
Args:
inputs: 3D tensor, [bs, max_len, max_len_char]
Returns:
char_conv_outputs: 3D tensor, [bs, max_len, output_dim]
"""
max_len, max_len_char = inputs.size(1), inputs.size(2)
inputs = inputs.view(-1, max_len * max_len_char) # [bs, -1]
input_embed = self.char_embedding(inputs) # [bs, ml*ml_c, feature_dim]
# input_embed = self.dropout_embed(input_embed)
# [bs, 1, max_len, max_len_char, feature_dim]
input_embed = input_embed.view(-1, 1, max_len, max_len_char, self.char_dim)
# conv
char_conv_outputs = []
for char_encoder in self.char_encoders:
conv_output = char_encoder(input_embed)
pool_output = torch.squeeze(torch.max(conv_output, -2)[0], -1)
char_conv_outputs.append(pool_output)
char_conv_outputs = torch.cat(char_conv_outputs, dim=1)
char_conv_outputs = char_conv_outputs.permute(0, 2, 1)
return char_conv_outputs
def forward(self, word, char, sentence_length):
"""
:param char:
:param word:
:param sentence_length:
:return:
"""
char_conv = self._char_forward(char)
char_conv = self.dropout(char_conv)
word = self.embed(word) # (N,W,D)
x = torch.cat((word, char_conv), -1)
x = self.dropout_embed(x)
x, _ = self.bilstm(x)
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
| 3,897 | 33.192982 | 124 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/Dataloader/Instance.py | # coding=utf-8
# @Author : bamtercelboo
# @Datetime : 2018/1/30 15:56
# @File : Instance.py
# @Last Modify Time : 2018/1/30 15:56
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Instance.py
FUNCTION : Data Instance
"""
import torch
import random
from DataUtils.Common import *
torch.manual_seed(seed_num)
random.seed(seed_num)
class Instance:
"""
Instance
"""
def __init__(self):
self.words = []
self.chars = []
self.labels = []
self.words_size = 0
self.chars_size = 0
self.words_index = []
self.chars_index = []
self.label_index = []
| 646 | 16.486486 | 46 | py |
KazNERD | KazNERD-main/BiLSTM_CNN_CRF/Dataloader/DataLoader_NER.py | # @Author : bamtercelboo
# @Datetime : 2018/1/30 15:58
# @File : DataConll2003_Loader.py
# @Last Modify Time : 2018/1/30 15:58
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE :
FUNCTION :
"""
import os, pdb
import re
import random
import torch
from Dataloader.Instance import Instance
from DataUtils.Common import *
torch.manual_seed(seed_num)
random.seed(seed_num)
class DataLoaderHelp(object):
"""
DataLoaderHelp
"""
@staticmethod
def _clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
@staticmethod
def _normalize_word(word):
"""
:param word:
:return:
"""
new_word = ""
for char in word:
if char.isdigit():
new_word += '0'
else:
new_word += char
return new_word
@staticmethod
def _sort(insts):
"""
:param insts:
:return:
"""
sorted_insts = []
sorted_dict = {}
for id_inst, inst in enumerate(insts):
sorted_dict[id_inst] = inst.words_size
dict = sorted(sorted_dict.items(), key=lambda d: d[1], reverse=True)
for key, value in dict:
sorted_insts.append(insts[key])
print("Sort Finished.")
return sorted_insts
@staticmethod
def _write_shuffle_inst_to_file(insts, path):
"""
:return:
"""
w_path = ".".join([path, shuffle])
if os.path.exists(w_path):
os.remove(w_path)
file = open(w_path, encoding="UTF-8", mode="w")
for id, inst in enumerate(insts):
for word, label in zip(inst.words, inst.labels):
file.write(" ".join([word, label, "\n"]))
file.write("\n")
print("write shuffle insts to file {}".format(w_path))
class DataLoader(DataLoaderHelp):
"""
DataLoader
"""
def __init__(self, path, shuffle, config):
"""
:param path: data path list
:param shuffle: shuffle bool
:param config: config
"""
#
print("Loading Data......")
self.data_list = []
self.max_count = config.max_count
self.path = path
self.shuffle = shuffle
# char feature
self.pad_char = [char_pad, char_pad]
# self.pad_char = []
self.max_char_len = config.max_char_len
def dataLoader(self):
"""
:return:
"""
path = self.path
shuffle = self.shuffle
assert isinstance(path, list), "Path Must Be In List"
print("Data Path {}".format(path))
for id_data in range(len(path)):
print("Loading Data Form {}".format(path[id_data]))
insts = self._Load_Each_Data(path=path[id_data], shuffle=shuffle)
random.shuffle(insts)
self._write_shuffle_inst_to_file(insts, path=path[id_data])
self.data_list.append(insts)
# return train/dev/test data
if len(self.data_list) == 3:
return self.data_list[0], self.data_list[1], self.data_list[2]
elif len(self.data_list) == 2:
return self.data_list[0], self.data_list[1]
def _Load_Each_Data(self, path=None, shuffle=False):
"""
:param path:
:param shuffle:
:return:
"""
assert path is not None, "The Data Path Is Not Allow Empty."
insts = []
with open(path, encoding="UTF-8") as f:
inst = Instance()
for line in f.readlines():
line = line.strip()
if line == "" and len(inst.words) != 0:
inst.words_size = len(inst.words)
insts.append(inst)
inst = Instance()
else:
line = line.strip().split(" ")
word = line[0]
char = self._add_char(word)
word = self._normalize_word(word)
inst.chars.append(char)
inst.words.append(word)
inst.labels.append(line[-1])
if len(insts) == self.max_count:
break
if len(inst.words) != 0:
inst.words_size = len(inst.words)
insts.append(inst)
# print("\n")
return insts
def _add_char(self, word):
"""
:param word:
:return:
"""
char = []
# char feature
for i in range(len(word)):
char.append(word[i])
if len(char) > self.max_char_len:
half = self.max_char_len // 2
word_half = word[:half] + word[-(self.max_char_len - half):]
char = word_half
else:
for i in range(self.max_char_len - len(char)):
char.append(char_pad)
return char
| 5,703 | 29.666667 | 95 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/test.py | import pickle
import sqlite3
from policy_gradients.agent import Trainer
import git
import numpy as np
import os
import copy
import random
import argparse
from policy_gradients import models
from policy_gradients.torch_utils import ZFilter
import sys
import json
import torch
import torch.optim as optim
from cox.store import Store, schema_from_dict
from run import main, add_common_parser_opts, override_json_params
from auto_LiRPA.eps_scheduler import LinearScheduler
import logging
logging.disable(logging.INFO)
def main(params):
override_params = copy.deepcopy(params)
excluded_params = ['config_path', 'out_dir_prefix', 'num_episodes', 'row_id', 'exp_id',
'load_model', 'seed', 'deterministic', 'noise_factor', 'compute_kl_cert', 'use_full_backward', 'sqlite_path', 'early_terminate']
sarsa_params = ['sarsa_enable', 'sarsa_steps', 'sarsa_eps', 'sarsa_reg', 'sarsa_model_path']
imit_params = ['imit_enable', 'imit_epochs', 'imit_model_path', 'imit_lr']
# original_params contains all flags in config files that are overridden via command.
for k in list(override_params.keys()):
if k in excluded_params:
del override_params[k]
if params['sqlite_path']:
print(f"Will save results in sqlite database in {params['sqlite_path']}")
connection = sqlite3.connect(params['sqlite_path'])
cur = connection.cursor()
cur.execute('''create table if not exists attack_results
(method varchar(20),
mean_reward real,
std_reward real,
min_reward real,
max_reward real,
sarsa_eps real,
sarsa_reg real,
sarsa_steps integer,
deterministic bool,
early_terminate bool)''')
connection.commit()
# We will set this flag to True we break early.
early_terminate = False
# Append a prefix for output path.
if params['out_dir_prefix']:
params['out_dir'] = os.path.join(params['out_dir_prefix'], params['out_dir'])
print(f"setting output dir to {params['out_dir']}")
if params['config_path']:
# Load from a pretrained model using existing config.
# First we need to create the model using the given config file.
json_params = json.load(open(params['config_path']))
params = override_json_params(params, json_params, excluded_params + sarsa_params + imit_params)
if params['sarsa_enable']:
assert params['attack_method'] == "none" or params['attack_method'] is None, \
"--train-sarsa is only available when --attack-method=none, but got {}".format(params['attack_method'])
if 'load_model' in params and params['load_model']:
for k, v in zip(params.keys(), params.values()):
assert v is not None, f"Value for {k} is None"
# Create the agent from config file.
p = Trainer.agent_from_params(params, store=None)
print('Loading pretrained model', params['load_model'])
pretrained_model = torch.load(params['load_model'])
if 'policy_model' in pretrained_model:
p.policy_model.load_state_dict(pretrained_model['policy_model'])
if 'val_model' in pretrained_model:
p.val_model.load_state_dict(pretrained_model['val_model'])
if 'policy_opt' in pretrained_model:
p.POLICY_ADAM.load_state_dict(pretrained_model['policy_opt'])
if 'val_opt' in pretrained_model:
p.val_opt.load_state_dict(pretrained_model['val_opt'])
# Restore environment parameters, like mean and std.
if 'envs' in pretrained_model:
p.envs = pretrained_model['envs']
for e in p.envs:
e.normalizer_read_only = True
e.setup_visualization(params['show_env'], params['save_frames'], params['save_frames_path'])
else:
# Load from experiment directory. No need to use a config.
base_directory = params['out_dir']
store = Store(base_directory, params['exp_id'], mode='r')
if params['row_id'] < 0:
row = store['final_results'].df
else:
checkpoints = store['checkpoints'].df
row_id = params['row_id']
row = checkpoints.iloc[row_id:row_id+1]
print("row to test: ", row)
if params['cpu'] == None:
cpu = False
else:
cpu = params['cpu']
p, _ = Trainer.agent_from_data(store, row, cpu, extra_params=params, override_params=override_params, excluded_params=excluded_params)
store.close()
rewards = []
print('Gaussian noise in policy:')
print(torch.exp(p.policy_model.log_stdev))
original_stdev = p.policy_model.log_stdev.clone().detach()
if params['noise_factor'] != 1.0:
p.policy_model.log_stdev.data[:] += np.log(params['noise_factor'])
if params['deterministic']:
print('Policy runs in deterministic mode. Ignoring Gaussian noise.')
p.policy_model.log_stdev.data[:] = -100
print('Gaussian noise in policy (after adjustment):')
print(torch.exp(p.policy_model.log_stdev))
if params['sarsa_enable']:
num_steps = params['sarsa_steps']
# learning rate scheduler: linearly annealing learning rate after
lr_decrease_point = num_steps * 2 / 3
decreasing_steps = num_steps - lr_decrease_point
lr_sch = lambda epoch: 1.0 if epoch < lr_decrease_point else (decreasing_steps - epoch + lr_decrease_point) / decreasing_steps
# robust training scheduler. Currently using 1/3 epochs for warmup, 1/3 for schedule and 1/3 for final training.
eps_start_point = int(num_steps * 1 / 3)
robust_eps_scheduler = LinearScheduler(params['sarsa_eps'], f"start={eps_start_point},length={eps_start_point}")
robust_beta_scheduler = LinearScheduler(1.0, f"start={eps_start_point},length={eps_start_point}")
# reinitialize value model, and run value function learning steps.
p.setup_sarsa(lr_schedule=lr_sch, eps_scheduler=robust_eps_scheduler, beta_scheduler=robust_beta_scheduler)
# Run Sarsa training.
for i in range(num_steps):
print(f'Step {i+1} / {num_steps}, lr={p.sarsa_scheduler.get_last_lr()}')
mean_reward = p.sarsa_step()
rewards.append(mean_reward)
# for w in p.val_model.parameters():
# print(f'{w.size()}, {torch.norm(w.view(-1), 2)}')
# Save Sarsa model.
saved_model = {
'state_dict': p.sarsa_model.state_dict(),
'metadata': params,
}
torch.save(saved_model, params['sarsa_model_path'])
elif params['imit_enable']:
num_epochs = params['imit_epochs']
num_episodes = params['num_episodes']
print('\n\n'+'Start collecting data\n'+'-'*80)
for i in range(num_episodes):
print('Collecting %d / %d episodes' % (i+1, num_episodes))
ep_length, ep_reward, actions, action_means, states, kl_certificates = p.run_test(compute_bounds=params['compute_kl_cert'], use_full_backward=params['use_full_backward'], original_stdev=original_stdev)
not_dones = np.ones(len(actions))
not_dones[-1] = 0
if i == 0:
all_actions = actions.copy()
all_states = states.copy()
all_not_dones = not_dones.copy()
else:
all_actions = np.concatenate((all_actions, actions), axis=0)
all_states = np.concatenate((all_states, states), axis=0)
all_not_dones = np.concatenate((all_not_dones, not_dones))
print('Collected actions shape:', all_actions.shape)
print('Collected states shape:', all_states.shape)
p.setup_imit(lr=params['imit_lr'])
p.imit_steps(torch.from_numpy(all_actions), torch.from_numpy(all_states), torch.from_numpy(all_not_dones), num_epochs)
saved_model = {
'state_dict': p.imit_network.state_dict(),
'metadata': params,
}
torch.save(saved_model, params['imit_model_path'])
else:
num_episodes = params['num_episodes']
all_rewards = []
all_lens = []
all_kl_certificates = []
for i in range(num_episodes):
print('Episode %d / %d' % (i+1, num_episodes))
ep_length, ep_reward, actions, action_means, states, kl_certificates = p.run_test(compute_bounds=params['compute_kl_cert'], use_full_backward=params['use_full_backward'], original_stdev=original_stdev)
if i == 0:
all_actions = actions.copy()
all_states = states.copy()
else:
all_actions = np.concatenate((all_actions, actions), axis=0)
all_states = np.concatenate((all_states, states), axis=0)
if params['compute_kl_cert']:
print('Epoch KL certificates:', kl_certificates)
all_kl_certificates.append(kl_certificates)
all_rewards.append(ep_reward)
all_lens.append(ep_length)
# Current step mean, std, min and max
mean_reward, std_reward, min_reward, max_reward = np.mean(all_rewards), np.std(all_rewards), np.min(all_rewards), np.max(all_rewards)
if i > num_episodes // 5 and params['early_terminate'] and params['sqlite_path'] and params['attack_method'] != 'none':
# Attempt to early terminiate if some other attacks have done with low reward.
cur.execute("SELECT MIN(mean_reward) FROM attack_results WHERE deterministic=?;", (params['deterministic'], ))
current_best_reward = cur.fetchone()[0]
print(f'current best: {current_best_reward}, ours: {mean_reward} +/- {std_reward}, min: {min_reward}')
# Terminiate if mean - 2*std is worse than best, or our min is worse than best.
if current_best_reward is not None and ((current_best_reward < mean_reward - 2 * std_reward) or
(min_reward > current_best_reward)):
print('terminating early!')
early_terminate = True
break
attack_dir = 'attack-{}-eps-{}'.format(params['attack_method'], params['attack_eps'])
if 'sarsa' in params['attack_method']:
attack_dir += '-sarsa_steps-{}-sarsa_eps-{}-sarsa_reg-{}'.format(params['sarsa_steps'], params['sarsa_eps'], params['sarsa_reg'])
if 'action' in params['attack_method']:
attack_dir += '-attack_sarsa_action_ratio-{}'.format(params['attack_sarsa_action_ratio'])
save_path = os.path.join(params['out_dir'], params['exp_id'], attack_dir)
if not os.path.exists(save_path):
os.makedirs(save_path)
for name, value in [('actions',all_actions), ('states', all_states), ('rewards', all_rewards), ('length', all_lens)]:
with open(os.path.join(save_path, '{}.pkl'.format(name)), 'wb') as f:
pickle.dump(value, f)
print(params)
with open(os.path.join(save_path, 'params.json'), 'w') as f:
json.dump(params, f, indent=4)
mean_reward, std_reward, min_reward, max_reward = np.mean(all_rewards), np.std(all_rewards), np.min(all_rewards), np.max(all_rewards)
if params['compute_kl_cert']:
print('KL certificates stats: mean: {}, std: {}, min: {}, max: {}'.format(np.mean(all_kl_certificates), np.std(all_kl_certificates), np.min(all_kl_certificates), np.max(all_kl_certificates)))
# write results to sqlite.
if params['sqlite_path']:
method = params['attack_method']
if params['attack_method'] == "sarsa":
# Load sarsa parameters from checkpoint
sarsa_ckpt = torch.load(params['attack_sarsa_network'])
sarsa_meta = sarsa_ckpt['metadata']
sarsa_eps = sarsa_meta['sarsa_eps'] if 'sarsa_eps' in sarsa_meta else -1.0
sarsa_reg = sarsa_meta['sarsa_reg'] if 'sarsa_reg' in sarsa_meta else -1.0
sarsa_steps = sarsa_meta['sarsa_steps'] if 'sarsa_steps' in sarsa_meta else -1
elif params['attack_method'] == "sarsa+action":
sarsa_eps = -1.0
sarsa_reg = params['attack_sarsa_action_ratio']
sarsa_steps = -1
else:
sarsa_eps = -1.0
sarsa_reg = -1.0
sarsa_steps = -1
try:
cur.execute("INSERT INTO attack_results VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);",
(method, mean_reward, std_reward, min_reward, max_reward, sarsa_eps, sarsa_reg, sarsa_steps, params['deterministic'], early_terminate))
connection.commit()
except sqlite3.OperationalError as e:
import traceback
traceback.print_exc()
print('Cannot insert into the SQLite table. Give up.')
else:
print(f'results saved to database {params["sqlite_path"]}')
connection.close()
print('\n')
print('all rewards:', all_rewards)
print('rewards stats:\nmean: {}, std:{}, min:{}, max:{}'.format(mean_reward, std_reward, min_reward, max_reward))
def get_parser():
parser = argparse.ArgumentParser(description='Generate experiments to be run.')
parser.add_argument('--config-path', type=str, default='', required=False,
help='json for this config')
parser.add_argument('--out-dir-prefix', type=str, default='', required=False,
help='prefix for output log path')
parser.add_argument('--exp-id', type=str, help='experiement id for testing', default='')
parser.add_argument('--row-id', type=int, help='which row of the table to use', default=-1)
parser.add_argument('--num-episodes', type=int, help='number of episodes for testing', default=50)
parser.add_argument('--compute-kl-cert', action='store_true', help='compute KL certificate')
parser.add_argument('--use-full-backward', action='store_true', help='Use full backward LiRPA bound for computing certificates')
parser.add_argument('--deterministic', action='store_true', help='disable Gaussian noise in action for evaluation')
parser.add_argument('--noise-factor', type=float, default=1.0, help='increase the noise (Gaussian std) by this factor.')
parser.add_argument('--load-model', type=str, help='load a pretrained model file', default='')
parser.add_argument('--seed', type=int, help='random seed', default=1234)
# Sarsa training related options.
parser.add_argument('--sarsa-enable', action='store_true', help='train a sarsa attack model.')
parser.add_argument('--sarsa-steps', type=int, help='Sarsa training steps.', default=30)
parser.add_argument('--sarsa-model-path', type=str, help='path to save the sarsa value network.', default='sarsa.model')
parser.add_argument('--imit-enable', action='store_true', help='train a imit attack model.')
parser.add_argument('--imit-epochs', type=int, help='Imit training steps.', default=100)
parser.add_argument('--imit-model-path', type=str, help='path to save the imit policy network.', default='imit.model')
parser.add_argument('--imit-lr', type=float, help='lr for imitation learning training', default=1e-3)
parser.add_argument('--sarsa-eps', type=float, help='eps for actions for sarsa training.', default=0.02)
parser.add_argument('--sarsa-reg', type=float, help='regularization term for sarsa training.', default=0.1)
# Other configs
parser.add_argument('--sqlite-path', type=str, help='save results to a sqlite database.', default='')
parser.add_argument('--early-terminate', action='store_true', help='terminate attack early if low attack reward detected in sqlite.')
parser = add_common_parser_opts(parser)
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
if args.load_model:
assert args.config_path, "Need to specificy a config file when loading a pretrained model."
if args.early_terminate:
assert args.sqlite_path != '', "Need to specify --sqlite-path to terminate early."
if args.sarsa_enable:
if args.sqlite_path != '':
print("When --sarsa-enable is specified, --sqlite-path and --early-terminate will be ignored.")
params = vars(args)
seed = params['seed']
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
main(params)
| 16,747 | 51.501567 | 213 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/get_best_pickle.py | import os
import numpy as np
import argparse
import uuid
from cox.store import Store
import pickle
from policy_gradients.torch_utils import *
import torch as ch
# Avoid HDF5 error
os.environ['HDF5_USE_FILE_LOCKING']='FALSE'
def get_alg_name(name):
name = os.path.basename(os.path.dirname(name))
if 'trpo' in name:
return 'trpo'
elif 'robust_ppo' in name:
return 'robust_ppo'
elif 'adv_sa_ppo' in name:
return 'adv_sa_ppo'
elif 'adv_ppo' in name:
return 'adv_ppo'
elif 'ppo' in name:
return 'ppo'
return 'unknown'
def get_env_name(name):
if 'humanoid' in name:
return 'humanoid'
if 'halfcheetah' in name:
return 'halfcheetah'
if 'ant' in name:
return 'ant'
elif 'hopper' in name:
return 'hopper'
elif 'walker' in name:
return 'walker'
return 'unknown'
def main(args):
base_directory = args.base_directory
exp_id_list = os.listdir(base_directory)
best_exp_id = None
all_rew = []
all_exp_id = []
train_eps = []
if args.exp_id == '':
for exp_id in exp_id_list:
s = None
try:
s = Store(base_directory, exp_id)
rew = s['final_results'].df['5_rewards'][0]
# train_eps.append(s['metadata'].df['robust_ppo_eps'][0])
all_rew.append(rew)
print(f"rew={rew}")
all_exp_id.append(exp_id)
s.close()
except Exception as e:
print(f'Load result error for {exp_id}: {e}')
if s is not None:
s.close()
continue
n_exps = len(all_rew)
all_rew = np.array(all_rew)
all_exp_id = np.array(all_exp_id)
ind = np.argsort(all_rew)
for i in range(len(train_eps)):
if train_eps[i] == 0.075:
print(all_exp_id[i])
print(f'Read {n_exps} models. Avg reward is {all_rew.mean()}, median is {all_rew[ind[n_exps//2]]}')
def dump_one_exp_id(best_exp_id):
print('\n\n>>>selected id', best_exp_id, 'args.best', args.best, '\n\n')
if best_exp_id is not None:
env_name = get_env_name(base_directory)
alg_name = get_alg_name(base_directory)
store = Store(base_directory, best_exp_id)
if 'final_results' in store.tables and not args.all_ckpts:
table_name = 'final_results'
index_id = 0
else:
table_name = 'checkpoints'
print(f'Warning: final_results table not found for expid {best_exp_id}, using last checkpoints')
index_id = -1 # use last checkpoint
ckpts = store[table_name]
print('loading from exp id:', best_exp_id, ' reward: ', ckpts.df['5_rewards'].iloc[index_id] if '5_rewards' in ckpts.df else "training not finished")
def dump_model(sel_ckpts, sel_index_id, sel_path):
P = {}
# mapper = ch.device('cuda:0')
for name in ['val_model', 'policy_model', 'val_opt', 'policy_opt', 'adversary_policy_model', 'adversary_val_model', 'adversary_policy_opt', 'adversary_val_opt']:
if name in sel_ckpts.df:
print(f'Saving {name} out of {len(sel_ckpts.df[name])}')
P[name] = sel_ckpts.get_state_dict(sel_ckpts.df[name].iloc[sel_index_id])
P['envs'] = sel_ckpts.get_pickle(sel_ckpts.df['envs'].iloc[sel_index_id])
ch.save(P, sel_path)
print('\n', sel_path, 'saved.\n')
if not args.all_ckpts:
if args.output is None:
path = f"best_model-{alg_name}-{env_name}.{best_exp_id[:8]}.model"
else:
path = args.output
dump_model(ckpts, index_id, path)
else:
iters = ckpts.df['iteration']
for i,it in enumerate(iters):
if i % args.dump_step != 0:
continue
path = f"best_model-{alg_name}-{env_name}.{best_exp_id[:8]}.iter{it}.model"
if args.output is not None:
if not os.path.exists(args.output):
os.makedirs(args.output)
path = os.path.join(args.output, path)
dump_model(ckpts, i, path)
store.close()
else:
raise ValueError('no usable exp found! Cannot load.')
if not args.all_exp:
if args.best:
if args.attack:
sel_exp_id = all_exp_id[ind[0]]
else:
sel_exp_id = all_exp_id[ind[-1]]
else:
if args.exp_id:
sel_exp_id = args.exp_id
else:
sel_exp_id = all_exp_id[ind[n_exps // 2]]
dump_one_exp_id(sel_exp_id)
else:
for sel_exp_id in all_exp_id:
dump_one_exp_id(sel_exp_id)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('base_directory', type=str, help='agent dir containing cox experiments')
parser.add_argument('--output', type=str, default='', help='output model filename')
parser.add_argument('--best', action='store_true', help='select best instead of median')
parser.add_argument('--exp_id', default='', help='specify an exp id to extract')
parser.add_argument('--all_ckpts', action='store_true', help='dump all checkpoints in training')
parser.add_argument('--attack', action='store_true', help='this is an attack experiment, select min reward instead of max')
parser.add_argument('--all_exp', action='store_true', help='dump all exp_id in training')
parser.add_argument('--dump_step', default=1, type=int, help='training checkpoint to dump every dump_step indices')
args = parser.parse_args()
args.base_directory = args.base_directory.rstrip("/")
uuid_str = os.path.basename(args.base_directory)
try:
uuid.UUID(uuid_str)
except ValueError:
pass
else:
print('input is a path ending with uuid, directly setting --exp_id based on it')
args.exp_id = uuid_str
args.base_directory = os.path.dirname(args.base_directory)
if args.output == '':
args.output = f'best_model.{args.exp_id[:8]}.model'
main(args)
| 6,513 | 38.719512 | 177 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/run.py | from policy_gradients.agent import Trainer
import git
import pickle
import random
import numpy as np
import os
import argparse
import traceback
from policy_gradients import models
import sys
import json
import torch
from cox.store import Store, schema_from_dict
# Tee object allows for logging to both stdout and to file
class Tee(object):
def __init__(self, file_path, stream_type, mode='a'):
assert stream_type in ['stdout', 'stderr']
self.file = open(file_path, mode)
self.stream_type = stream_type
self.errors = 'chill'
if stream_type == 'stdout':
self.stream = sys.stdout
sys.stdout = self
else:
self.stream = sys.stderr
sys.stderr = self
def write(self, data):
self.file.write(data)
self.stream.write(data)
def flush(self):
self.file.flush()
self.stream.flush()
def main(params):
for k, v in zip(params.keys(), params.values()):
assert v is not None, f"Value for {k} is None"
# #
# Setup logging
# #
metadata_schema = schema_from_dict(params)
base_directory = params['out_dir']
store = Store(base_directory)
# redirect stderr, stdout to file
"""
def make_err_redirector(stream_name):
tee = Tee(os.path.join(store.path, stream_name + '.txt'), stream_name)
return tee
stderr_tee = make_err_redirector('stderr')
stdout_tee = make_err_redirector('stdout')
"""
# Store the experiment path and the git commit for this experiment
metadata_schema.update({
'store_path': str,
'git_commit': str
})
repo = git.Repo(path=os.path.dirname(os.path.realpath(__file__)),
search_parent_directories=True)
metadata_table = store.add_table('metadata', metadata_schema)
metadata_table.update_row(params)
metadata_table.update_row({
'store_path': store.path,
'git_commit': repo.head.object.hexsha
})
metadata_table.flush_row()
# Extra items in table when minimax training is enabled.
if params['mode'] == "adv_ppo" or params['mode'] == 'adv_trpo' or params['mode'] == 'adv_sa_ppo':
adversary_table_dict = {
'adversary_policy_model': store.PYTORCH_STATE,
'adversary_policy_opt': store.PYTORCH_STATE,
'adversary_val_model': store.PYTORCH_STATE,
'adversary_val_opt': store.PYTORCH_STATE,
}
else:
adversary_table_dict = {}
# Table for checkpointing models and envs
if params['save_iters'] > 0:
checkpoint_dict = {
'val_model': store.PYTORCH_STATE,
'policy_model': store.PYTORCH_STATE,
'envs': store.PICKLE,
'policy_opt': store.PYTORCH_STATE,
'val_opt': store.PYTORCH_STATE,
'iteration': int,
'5_rewards': float,
}
checkpoint_dict.update(adversary_table_dict)
store.add_table('checkpoints', checkpoint_dict)
# The trainer object is in charge of sampling trajectories and
# taking PPO/TRPO optimization steps
p = Trainer.agent_from_params(params, store=store)
if params['initial_std'] != 1.0:
p.policy_model.log_stdev.data[:] = np.log(params['initial_std'])
if 'load_model' in params and params['load_model']:
print('Loading pretrained model', params['load_model'])
pretrained_model = torch.load(params['load_model'])
if 'policy_model' in pretrained_model:
p.policy_model.load_state_dict(pretrained_model['policy_model'])
if params['deterministic']:
print('Policy runs in deterministic mode. Ignoring Gaussian noise.')
p.policy_model.log_stdev.data[:] = -100
else:
print('Policy runs in non deterministic mode with Gaussian noise.')
if 'val_model' in pretrained_model:
p.val_model.load_state_dict(pretrained_model['val_model'])
if 'policy_opt' in pretrained_model:
p.POLICY_ADAM.load_state_dict(pretrained_model['policy_opt'])
if 'val_opt' in pretrained_model:
p.val_opt.load_state_dict(pretrained_model['val_opt'])
# Load adversary models.
if 'no_load_adv_policy' in params and params['no_load_adv_policy']:
print('Skipping loading adversary models.')
else:
if 'adversary_policy_model' in pretrained_model and hasattr(p, 'adversary_policy_model'):
p.adversary_policy_model.load_state_dict(pretrained_model['adversary_policy_model'])
if 'adversary_val_model' in pretrained_model and hasattr(p, 'adversary_val_model'):
p.adversary_val_model.load_state_dict(pretrained_model['adversary_val_model'])
if 'adversary_policy_opt' in pretrained_model and hasattr(p, 'adversary_policy_opt'):
p.adversary_policy_opt.load_state_dict(pretrained_model['adversary_policy_opt'])
if 'adversary_val_opt' in pretrained_model and hasattr(p, 'adversary_val_opt'):
p.adversary_val_opt.load_state_dict(pretrained_model['adversary_val_opt'])
# Load optimizer states.
# p.POLICY_ADAM.load_state_dict(pretrained_models['policy_opt'])
# p.val_opt.load_state_dict(pretrained_models['val_opt'])
# Restore environment parameters, like mean and std.
if 'envs' in pretrained_model:
p.envs = pretrained_model['envs']
for e in p.envs:
e.setup_visualization(params['show_env'], params['save_frames'], params['save_frames_path'])
rewards = []
# Table for final results
final_dict = {
'iteration': int,
'5_rewards': float,
'terminated_early': bool,
'val_model': store.PYTORCH_STATE,
'policy_model': store.PYTORCH_STATE,
'envs': store.PICKLE,
'policy_opt': store.PYTORCH_STATE,
'val_opt': store.PYTORCH_STATE,
}
final_dict.update(adversary_table_dict)
final_table = store.add_table('final_results', final_dict)
def add_adversary_to_table(p, table_dict):
if params['mode'] == "adv_ppo" or params['mode'] == 'adv_trpo' or params['mode'] == 'adv_sa_ppo':
table_dict["adversary_policy_model"] = p.adversary_policy_model.state_dict()
table_dict["adversary_policy_opt"] = p.ADV_POLICY_ADAM.state_dict()
table_dict["adversary_val_model"] = p.adversary_val_model.state_dict()
table_dict["adversary_val_opt"] = p.adversary_val_opt.state_dict()
return table_dict
def finalize_table(iteration, terminated_early, rewards):
final_5_rewards = np.array(rewards)[-5:].mean()
final_dict = {
'iteration': iteration,
'5_rewards': final_5_rewards,
'terminated_early': terminated_early,
'val_model': p.val_model.state_dict(),
'policy_model': p.policy_model.state_dict(),
'policy_opt': p.POLICY_ADAM.state_dict(),
'val_opt': p.val_opt.state_dict(),
'envs': p.envs
}
final_dict = add_adversary_to_table(p, final_dict)
final_table.append_row(final_dict)
ret = 0
# Try-except so that we save if the user interrupts the process
try:
for i in range(params['train_steps']):
print('Step %d' % (i,))
if params['save_iters'] > 0 and i % params['save_iters'] == 0 and i != 0:
final_5_rewards = np.array(rewards)[-5:].mean()
print(f'Saving checkpoints to {store.path} with reward {final_5_rewards:.5g}')
checkpoint_dict = {
'iteration': i,
'val_model': p.val_model.state_dict(),
'policy_model': p.policy_model.state_dict(),
'policy_opt': p.POLICY_ADAM.state_dict(),
'val_opt': p.val_opt.state_dict(),
'envs': p.envs,
'5_rewards': final_5_rewards,
}
checkpoint_dict = add_adversary_to_table(p, checkpoint_dict)
store['checkpoints'].append_row(checkpoint_dict)
mean_reward = p.train_step()
rewards.append(mean_reward)
# For debugging and tuning, we can break in the middle.
if i == params['force_stop_step']:
print('Terminating early because --force-stop-step is set.')
raise KeyboardInterrupt
finalize_table(i, False, rewards)
except KeyboardInterrupt:
finalize_table(i, True, rewards)
ret = 1
except:
print("An error occurred during training:")
traceback.print_exc()
# Other errors, make sure to finalize the cox store before exiting.
finalize_table(i, True, rewards)
ret = -1
print(f'Models saved to {store.path}')
store.close()
return ret
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def add_common_parser_opts(parser):
# Basic setup
parser.add_argument('--game', type=str, help='gym game')
parser.add_argument('--mode', type=str, choices=['ppo', 'trpo', 'robust_ppo', 'adv_ppo', 'adv_trpo', 'adv_sa_ppo'],
help='pg alg')
parser.add_argument('--out-dir', type=str,
help='out dir for store + logging')
parser.add_argument('--advanced-logging', type=str2bool, const=True, nargs='?')
parser.add_argument('--kl-approximation-iters', type=int,
help='how often to do kl approx exps')
parser.add_argument('--log-every', type=int)
parser.add_argument('--policy-net-type', type=str,
choices=models.POLICY_NETS.keys())
parser.add_argument('--value-net-type', type=str,
choices=models.VALUE_NETS.keys())
parser.add_argument('--train-steps', type=int,
help='num agent training steps')
parser.add_argument('--cpu', type=str2bool, const=True, nargs='?')
# Which value loss to use
parser.add_argument('--value-calc', type=str,
help='which value calculation to use')
parser.add_argument('--initialization', type=str)
# General Policy Gradient parameters
parser.add_argument('--num-actors', type=int, help='num actors (serial)',
choices=[1])
parser.add_argument('--t', type=int,
help='num timesteps to run each actor for')
parser.add_argument('--gamma', type=float, help='discount on reward')
parser.add_argument('--lambda', type=float, help='GAE hyperparameter')
parser.add_argument('--val-lr', type=float, help='value fn learning rate')
parser.add_argument('--val-epochs', type=int, help='value fn epochs')
parser.add_argument('--initial-std', type=float, help='initial value of std for Gaussian policy. Default is 1.')
# PPO parameters
parser.add_argument('--adam-eps', type=float, choices=[0, 1e-5], help='adam eps parameter')
parser.add_argument('--num-minibatches', type=int,
help='num minibatches in ppo per epoch')
parser.add_argument('--ppo-epochs', type=int)
parser.add_argument('--ppo-lr', type=float,
help='if nonzero, use gradient descent w this lr')
parser.add_argument('--ppo-lr-adam', type=float,
help='if nonzero, use adam with this lr')
parser.add_argument('--anneal-lr', type=str2bool,
help='if we should anneal lr linearly from start to finish')
parser.add_argument('--clip-eps', type=float, help='ppo clipping')
parser.add_argument('--clip-val-eps', type=float, help='ppo clipping value')
parser.add_argument('--entropy-coeff', type=float,
help='entropy weight hyperparam')
parser.add_argument('--value-clipping', type=str2bool,
help='should clip values (w/ ppo eps)')
parser.add_argument('--value-multiplier', type=float,
help='coeff for value loss in combined step ppo loss')
parser.add_argument('--share-weights', type=str2bool,
help='share weights in valnet and polnet')
parser.add_argument('--clip-grad-norm', type=float,
help='gradient norm clipping (-1 for no clipping)')
parser.add_argument('--policy-activation', type=str,
help='activation function for countinous policy network')
# TRPO parameters
parser.add_argument('--max-kl', type=float, help='trpo max kl hparam')
parser.add_argument('--max-kl-final', type=float, help='trpo max kl final')
parser.add_argument('--fisher-frac-samples', type=float,
help='frac samples to use in fisher vp estimate')
parser.add_argument('--cg-steps', type=int,
help='num cg steps in fisher vp estimate')
parser.add_argument('--damping', type=float, help='damping to use in cg')
parser.add_argument('--max-backtrack', type=int, help='max bt steps in fvp')
parser.add_argument('--trpo-kl-reduce-func', type=str, help='reduce function for KL divergence used in line search. mean or max.')
# Robust PPO parameters.
parser.add_argument('--robust-ppo-eps', type=float, help='max eps for robust PPO training')
parser.add_argument('--robust-ppo-method', type=str, choices=['convex-relax', 'sgld', 'pgd'], help='robustness regularization methods')
parser.add_argument('--robust-ppo-pgd-steps', type=int, help='number of PGD optimization steps')
parser.add_argument('--robust-ppo-detach-stdev', type=str2bool, help='detach gradient of standard deviation term')
parser.add_argument('--robust-ppo-reg', type=float, help='robust PPO regularization')
parser.add_argument('--robust-ppo-eps-scheduler-opts', type=str, help='options for epsilon scheduler for robust PPO training')
parser.add_argument('--robust-ppo-beta', type=float, help='max beta (IBP mixing factor) for robust PPO training')
parser.add_argument('--robust-ppo-beta-scheduler-opts', type=str, help='options for beta scheduler for robust PPO training')
# Adversarial PPO parameters.
parser.add_argument('--adv-ppo-lr-adam', type=float,
help='if nonzero, use adam for adversary policy with this lr')
parser.add_argument('--adv-entropy-coeff', type=float,
help='entropy weight hyperparam for adversary policy')
parser.add_argument('--adv-eps', type=float, help='adversary perturbation eps')
parser.add_argument('--adv-clip-eps', type=float, help='ppo clipping for adversary policy')
parser.add_argument('--adv-val-lr', type=float, help='value fn learning rate for adversary policy')
parser.add_argument('--adv-policy-steps', type=float, help='number of policy steps before adversary steps')
parser.add_argument('--adv-adversary-steps', type=float, help='number of adversary steps before adversary steps')
parser.add_argument('--adv-adversary-ratio', type=float, help='percentage of frames to attack for the adversary')
# Adversarial attack parameters.
parser.add_argument('--attack-method', type=str, choices=["none", "critic", "random", "action", "sarsa", "sarsa+action", "advpolicy", "action+imit"], help='adversarial attack methods.')
parser.add_argument('--attack-ratio', type=float, help='attack only a ratio of steps.')
parser.add_argument('--attack-steps', type=int, help='number of PGD optimization steps.')
parser.add_argument('--attack-eps', type=str, help='epsilon for attack. If set to "same", we will use value of robust-ppo-eps.')
parser.add_argument('--attack-step-eps', type=str, help='step size for each iteration. If set to "auto", we will use attack-eps / attack-steps')
parser.add_argument('--attack-sarsa-network', type=str, help='sarsa network to load for attack.')
parser.add_argument('--attack-sarsa-action-ratio', type=float, help='When set to non-zero, enable sarsa-action attack.')
parser.add_argument('--attack-advpolicy-network', type=str, help='adversarial policy network to load for attack.')
parser.add_argument('--collect-perturbed-states', type=str2bool, help='collect perturbed states during training')
# Normalization parameters
parser.add_argument('--norm-rewards', type=str, help='type of rewards normalization',
choices=['rewards', 'returns', 'none'])
parser.add_argument('--norm-states', type=str2bool, help='should norm states')
parser.add_argument('--clip-rewards', type=float, help='clip rews eps')
parser.add_argument('--clip-observations', type=float, help='clips obs eps')
# Sequence training parameters
parser.add_argument('--history-length', type=int, help='length of history to use for LSTM. If <= 1, we do not use LSTM.')
parser.add_argument('--use-lstm-val', type=str2bool, help='use a lstm for value function')
# Saving
parser.add_argument('--save-iters', type=int, help='how often to save model (0 = no saving)')
parser.add_argument('--force-stop-step', type=int, help='forcibly terminate after a given number of steps. Useful for debugging and tuning.')
# Visualization
parser.add_argument('--show-env', type=str2bool, help='Show environment visualization')
parser.add_argument('--save-frames', type=str2bool, help='Save environment frames')
parser.add_argument('--save-frames-path', type=str, help='Path to save environment frames')
# For grid searches only
# parser.add_argument('--cox-experiment-path', type=str, default='')
return parser
def override_json_params(params, json_params, excluding_params):
# Override the JSON config with the argparse config
missing_keys = []
for key in json_params:
if key not in params:
missing_keys.append(key)
assert not missing_keys, "Following keys not in args: " + str(missing_keys)
missing_keys = []
for key in params:
if key not in json_params and key not in excluding_params:
missing_keys.append(key)
assert not missing_keys, "Following keys not in JSON: " + str(missing_keys)
json_params.update({k: params[k] for k in params if params[k] is not None})
return json_params
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate experiments to be run.')
parser.add_argument('--config-path', type=str, required=True,
help='json for this config')
parser.add_argument('--out-dir-prefix', type=str, default="", required=False,
help='prefix for output log path')
parser.add_argument('--load-model', type=str, default=None, required=False, help='load pretrained model and optimizer states before training')
parser.add_argument('--no-load-adv-policy', action='store_true', required=False, help='Do not load adversary policy and value network from pretrained model.')
parser.add_argument('--adv-policy-only', action='store_true', required=False, help='Run adversary only, by setting main agent learning rate to 0')
parser.add_argument('--deterministic', action='store_true', help='disable Gaussian noise in action for --adv-policy-only mode')
parser.add_argument('--seed', type=int, help='random seed', default=-1)
parser = add_common_parser_opts(parser)
args = parser.parse_args()
params = vars(args)
seed = params['seed']
json_params = json.load(open(args.config_path))
extra_params = ['config_path', 'out_dir_prefix', 'load_model', 'no_load_adv_policy', 'adv_policy_only', 'deterministic', 'seed']
params = override_json_params(params, json_params, extra_params)
if params['adv_policy_only']:
if params['adv_ppo_lr_adam'] == 'same':
params['adv_ppo_lr_adam'] = params['ppo_lr_adam']
print(f"automatically setting adv_ppo_lr_adam to {params['adv_ppo_lr_adam']}")
print('disabling policy training (train adversary only)')
params['ppo_lr_adam'] = 0.0 * params['ppo_lr_adam']
else:
# deterministic mode only valid when --adv-policy-only is set
assert not params['deterministic']
if seed != -1:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.set_printoptions(threshold=5000, linewidth=120)
# Append a prefix for output path.
if args.out_dir_prefix:
params['out_dir'] = os.path.join(args.out_dir_prefix, params['out_dir'])
print(f"setting output dir to {params['out_dir']}")
main(params)
| 20,877 | 47.440835 | 189 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/cox.git/cox/store.py | import pandas as pd
import torch as ch
import numpy as np
import dill as pickle
from uuid import uuid4
from .utils import *
import os
import warnings
from tensorboardX import SummaryWriter
TABLE_OBJECT_DIR = '.table_objects'
SAVE_DIR = 'save'
STORE_BASENAME = 'store.h5'
TENSORBOARD_DIR = 'tensorboard'
COX_DATA_KEY = 'COX_DATA'
PICKLE = '__pickle__'
OBJECT = '__object__'
PYTORCH_STATE = '__pytorch_state__'
pd.set_option('io.hdf.default_format','table')
from pandas.io.pytables import PerformanceWarning
warnings.simplefilter(action="ignore", category=PerformanceWarning)
class Store():
'''Serializes and saves data from experiment runs. Automatically makes a
tensorboard. Access the tensorboard field, and refer to the TensorboardX
documentation for more information about how to manipulate it (it is a
tensorboardX object).
Directly saves: int, float, torch scalar, string
Saves and links: np.array, torch tensor, python object (via pickle or
pytorch serialization)
Note on python object serialization: you can choose one of three options to
serialize using: `OBJECT` (store as python serialization inline), `PICKLE`
(store as python serialization on disk), or `PYTORCH_STATE` (save as
pytorch serialization on disk). All these types are represented as
properties, i.e. `store_instance.PYTORCH_STATE`. You will need to manually
decode the objects using the static methods found in the `Table` class
(`get_pytorch_state`, `get_object`, `get_pickle`).
'''
OBJECT = OBJECT
PICKLE = PICKLE
PYTORCH_STATE = PYTORCH_STATE
def __init__(self, storage_folder, exp_id=None, new=False, mode='a'):
'''
Make new experiment store in `storage_folder`, within its subdirectory
`exp_id` (if not none). If an experiment exists already with this
corresponding directory, open it for reading.
Args:
storage_folder (str) : parent folder in which we will put a folder
with all our experiment data (this store).
exp_id (str) : dir name in `storage_folder` under which we will
store experimental data.
new (str): enforce that this store has never been created before.
mode (str) : mode for accessing tables. a is append only, r is read
only, w is write.
'''
if not exp_id:
exp_id = str(uuid4())
exp_path = os.path.join(storage_folder, exp_id)
if os.path.exists(exp_path) and new:
raise ValueError("This experiment has already been run.")
if not os.path.exists(exp_path):
mkdirp(exp_path)
print('Logging in: %s' % os.path.abspath(exp_path))
# Start HDF file
self.store = pd.HDFStore(os.path.join(exp_path, STORE_BASENAME), mode=mode)
# Setup
self.exp_id = exp_id
self.path = os.path.abspath(exp_path)
self.save_dir = os.path.join(exp_path, SAVE_DIR)
self.tb_dir = os.path.join(exp_path, TENSORBOARD_DIR)
# Where to save table objects
self._table_object_dir = os.path.join(exp_path, TABLE_OBJECT_DIR)
if mode != 'r':
self.tensorboard = SummaryWriter(self.tb_dir)
mkdirp(self.save_dir)
mkdirp(self._table_object_dir)
mkdirp(self.tb_dir)
self.tables = Table._tables_from_store(self.store, self._table_object_dir)
self.keys = self.tables.keys()
def close(self):
'''
Closes underlying HDFStore of this store.
'''
self.store.close()
def __str__(self):
s = []
for table_name, table in self.tables.items():
s.append('-- Table: %s --' % table_name)
s.append(str(table))
s.append('')
return '\n'.join(s)
def get_table(self, table_id):
'''
Gets table with key `table_id`.
Args:
table_id (str) : id of table to get from this store.
Returns:
The corresponding table (Table object).
'''
return self.tables[table_id]
def __getitem__(self, table_id):
'''
Gets table with key `table_id`.
Args:
table_id (str) : id of table to get from this store.
Returns:
The corresponding table (Table object).
'''
return self.get_table(table_id)
def add_table(self, table_name, schema):
'''
Add a new table to the experiment.
Args:
table_name (str) : a name for the table
schema (dict) : a dict for the schema of the table. The entries
should be of the form name:type. For example, if we wanted to
add a float column in the table named acc, we would have an
entry `'acc':float`.
Returns:
The table object of the new table.
'''
table = Table(table_name, schema, self._table_object_dir, self.store)
self.tables[table_name] = table
return table
def add_table_like_example(self, table_name, example, alternative=OBJECT):
'''
Add a new table to the experiment, using an example dictionary as the
basis for the types of the columns.
Args:
table_name (str) : a name for the table
example (dict) : example for the schema of the table. Make a table
with columns with types corresponding to the types of the
objects in the dictionary.
alternative (self.OBJECT|self.PICKLE|self.PYTORCH_STATE) : how to
store columns that are python objects.
'''
schema = schema_from_dict(example, alternative=alternative)
return self.add_table(table_name, schema)
def log_table_and_tb(self, table_name, update_dict, summary_type='scalar'):
'''
Log to a table and also a tensorboard.
Args:
table_name (str) : which table to log to
update_dict (dict) : values to log and store as a dictionary of
column mapping to value.
summary_type (str) : what type of summary to log to tensorboard as
'''
table = self.tables[table_name]
update_dict = _clean_dict(update_dict, table.schema)
tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)
iteration = table.nrows
for name, value in update_dict.items():
tb_func('/'.join([table_name, name]), value, iteration)
table.update_row(update_dict)
class Table():
'''
A class representing a single storer table, to be written to by
the experiment. This is essentially a single HDFStore table.
'''
def _tables_from_store(store, table_obj_dir):
tables = {}
for key in store.keys():
storer = store.get_storer(key)
if COX_DATA_KEY in storer.attrs:
data = storer.attrs[COX_DATA_KEY]
name = data['name']
table = Table(name, data['schema'], table_obj_dir, store,
has_initialized=True)
tables[name] = table
return tables
def __str__(self):
s = str(self.df)
if len(s.split('\n')) > 5:
s = str(self.df[:4]) + '\n ... (%s rows hidden)' % self.df.shape[0]
return s
def __init__(self, name, schema, table_obj_dir, store,
has_initialized=False):
'''
Create a new Table object.
Args:
name (str) : name of table
schema (dict) : schema of table (as described in `store` class)
table_obj_dir (str) : where to store serialized objects on disk
store (Store) : parent store.
has_initialized (bool) : has this table been created yet.
'''
self._name = name
self._schema = schema
self._HDFStore = store
self._curr_row_data = None
self._table_obj_dir = table_obj_dir
self._has_initialized = has_initialized
self._create_row()
@property
def df(self):
'''
Access the underlying pandas dataframe for this table.
'''
if self._has_initialized:
return self._HDFStore[self._name]
else:
return pd.DataFrame(columns=self._schema.keys())
@property
def schema(self):
'''
Access the underlying schema for this table.
'''
return dict(self._schema)
@property
def nrows(self):
'''
How many rows this table has.
'''
if self._has_initialized:
return self._HDFStore.get_storer(self._name).nrows
else:
return 0
def _initialize_nonempty_table(self):
self._HDFStore.get_storer(self._name).attrs[COX_DATA_KEY] = {
'schema':self._schema,
'name':self._name,
}
self._has_initialized = True
def append_row(self, data):
'''
Write a dictionary with format column name:value as a row to the table.
Must have a value for each column. See `update_row` for more mechanics.
Args:
data (dict) : dictionary with format `column name`:`value`.
'''
self.update_row(data)
self.flush_row()
def _create_row(self):
assert self._curr_row_data is None
curr_row_dict = {s: None for s in self._schema}
self._curr_row_data = curr_row_dict
def update_row(self, data):
'''
Update the currently considered row in the data store. Our database is
append only using the `Table` API. We can update this single row as much
as we desire, using column:value mappings in `data`. Eventually, the
currently considered row must be written to the database using
`flush_row`. This model allows for writing rows easily when not all the
values are known in a single context. Each `data` object does not need
to contain every column, but by the time that the row is flushed every
column must obtained a value. This update model is stateful.
Python primitives (`int`, `float`, `str`, `bool`), and their numpy
equivalents are written automatically to the row. All other objects are
serialized (see `Store`).
Args:
data (dict) : a dictionary with format `column name`:`value`.
'''
# Data sanity checks
assert self._curr_row_data is not None
assert len(set(data.keys())) == len(data.keys())
if any([k not in self._schema for k in data]):
raise ValueError("Got keys that are undeclared in schema")
for k, v in data.items():
v_type = self._schema[k]
if v_type == OBJECT:
to_store = obj_to_string(v)
elif v_type == PICKLE or v_type == PYTORCH_STATE:
uid = str(uuid4())
fname = os.path.join(self._table_obj_dir, uid)
if v_type == PICKLE:
with open(fname, 'wb') as f:
pickle.dump(v, f)
else:
if 'state_dict' in dir(v):
v = v.state_dict()
ch.save(v, fname, pickle_module=pickle)
to_store = uid
else:
to_store = v_type(v)
assert to_store is not None
self._curr_row_data[k] = to_store
def get_pickle(self, uid):
'''
Unserialize object of store.PICKLE type (a pickled object stored as a
string on disk).
Args:
uid (str) : identifier corresponding to stored object in the table.
'''
fname = os.path.join(self._table_obj_dir, uid)
with open(fname, 'rb') as f:
obj = pickle.load(f)
return obj
def get_state_dict(self, uid, **kwargs):
'''
Unserialize object of store.PYTORCH_STATE type (object stored using
pytorch's serialization system).
Args:
uid (str) : identifier corresponding to stored object in the table.
'''
fname = os.path.join(self._table_obj_dir, uid)
kwargs['pickle_module'] = pickle
return ch.load(fname, **kwargs)
def get_object(self, s):
'''
Unserialize object of store.OBJECT type (a pickled object stored as a
string in the table).
Args:
s (str) : pickle string to unpickle into a python object.
'''
return string_to_obj(s)
def flush_row(self):
'''
Writes the current row we have staged (using `update_row`) to the table.
Another row is immediately staged for `update_row` to act on.
'''
self._curr_row_data = _clean_dict(self._curr_row_data, self._schema)
for k in self._schema:
try:
assert self._curr_row_data[k] is not None
except:
dne = not (k in self._curr_row_data)
if dne:
msg = 'Col %s does not exist!' % k
else:
msg = 'Col %s is None!' % k
raise ValueError(msg)
for k, v in self._curr_row_data.items():
self._curr_row_data[k] = [v]
df = pd.DataFrame(self._curr_row_data)
try:
nrows = self._HDFStore.get_storer(self._name).nrows
except:
nrows = 0
df.index += nrows
self._HDFStore.append(self._name, df, format='table')
if not self._has_initialized:
self._initialize_nonempty_table()
self._curr_row_data = None
self._create_row()
def schema_from_dict(d, alternative=OBJECT):
'''
Given a dictionary mapping column names to values, make a corresponding
schema.
Args:
d (dict) : dict of values we are going to infer the schema from
alternative (self.OBJECT|self.PICKLE|self.PYTORCH_STATE) : how to
store columns that are python objects.
'''
natural_types = set([int, str, float, bool])
schema = {}
for k, v in d.items():
t = type(v)
if t in natural_types:
schema[k] = t
else:
schema[k] = alternative
return schema
def _clean_dict(d, schema):
d = dict(d)
for k, v in d.items():
v_type = schema[k]
if v_type in [int, float, bool]:
if type(v) == ch.Tensor or type(v) == np.ndarray:
if v.shape == ():
v = v_type(v)
d[k] = v
return d
| 14,666 | 32.486301 | 83 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/policy_gradients/tests.py | from torch_utils import *
import torch as ch
if __name__ == '__main__':
def func(x):
f1 = x[0] * x[0] * x[1]
f2 = x[2]/(x[1] * x[0])
f3 = x[2]
f4 = x[0]
return ch.stack([f1, f2, f3, f4])
def J(x):
return ch.tensor([
[2 * x[0] * x[1], x[0]**2, 0],
[-x[2] / (x[1] * x[0]**2), -x[2] / (x[0] * x[1]**2), (x[0] * x[1])**(-1)],
[0, 0, 1],
[1, 0, 0]
])
ch.manual_seed(0)
print('Running tests!')
def test_fisher_vector_prod(x):
fisher_product = 0
for state_action_dist in pds[selected]:
diag_M = state_action_dist.pow(-1)
Jx = jvp(state_action_dist, net.parameters(), x)
MJx = diag_M * Jx
JTMJx = vjp(state_action_dist, net.parameters(), MJx, False)
fisher_product += JTMJx
alt = alt_fisher_product(x)
res = fisher_product / ch.tensor(num_samples).float() + x*params.DAMPING
print(alt, res)
print("Correlation", (alt*res).sum()/ch.sqrt((alt**2).sum() * (res**2).sum()))
return res
def test_it(t, e):
return (t - e)/((t + e)/2)
for i in range(5):
x = ch.tensor(ch.rand(3), requires_grad=True)
v = ch.rand(4)
u = ch.rand(3)
jacobian = J(x)
Ju = jacobian @ u
JTv = jacobian.t() @ v
est_Ju = jvp(func(x),[x],u)
est_JTv = vjp(func(x),[x],v)
print('Ju:', test_it(Ju, est_Ju))
print('JTv:', test_it(JTv, est_JTv))
print('-' * 80)
| 1,571 | 27.071429 | 87 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/policy_gradients/convex_relaxation.py | import random, sys, time, multiprocessing
from auto_LiRPA import BoundedModule, BoundedTensor, BoundedParameter
from auto_LiRPA.perturbations import *
from policy_gradients.models import activation_with_name
forward_one = True
## Step 1: Initial original model as usual, see model details in models/sample_models.py
class RelaxedCtsPolicyForState(nn.Module):
def __init__(self, state_dim=11, action_dim=3, init=None, hidden_sizes=[64, 64],
time_in_state=False, share_weights=False, activation='tanh', policy_model=None):
super().__init__()
assert time_in_state is False
assert share_weights is False
assert init is None
if isinstance(activation, str):
self.activation = activation_with_name(activation)()
else:
# Default to tanh.
self.activation = nn.Tanh()
self.action_dim = action_dim
if policy_model is None:
# Create our own layers.
self.affine_layers = nn.ModuleList()
prev_size = state_dim
for i in hidden_sizes:
lin = nn.Linear(prev_size, i, bias=False)
self.affine_layers.append(lin)
prev_size = i
self.final_mean = nn.Linear(prev_size, action_dim, bias=False)
stdev_init = torch.zeros(action_dim)
# FIXME: name of this variable must contain "weight" due to a bug in auto_LiRPA.
if not forward_one:
self.log_weight = torch.nn.Parameter(stdev_init)
else:
print("Create Relaxed model without duplicating parameters...")
# Copy parameters from an existing model, do not create new parameters!
self.affine_layers = policy_model.affine_layers
# Copy the final mean vector.
self.final_mean = policy_model.final_mean
if not forward_one:
# Copy the log of variance.
self.log_weight = policy_model.log_stdev
'''
Compute the L2 distance of mean vectors, to bound KL divergence.
'''
if forward_one:
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
means = self.final_mean(x)
return means
else:
def forward(self, x, old_means):
for affine in self.affine_layers:
# Generate an extra "one" for each element, which acts as a bias.
x = self.activation(affine(x))
means = self.final_mean(x)
diff = means - old_means
stdev = torch.exp(self.log_weight)
return ((diff * diff) / stdev).sum(axis=-1, keepdim=True)
def get_means(self, x):
for affine in self.affine_layers:
x = affine(x)
x = self.activation(x)
means = self.final_mean(x)
return means
def intermediate_to_kl(lb, ub, means, stdev=None):
lb = lb - means
ub = ub - means
u = torch.max(lb.abs(), ub.abs())
if stdev is None:
return (u * u).sum(axis=-1, keepdim=True)
else:
return ((u * u) / (stdev * stdev)).sum(axis=-1, keepdim=True)
if forward_one:
def get_kl_bound(model, x, means, eps, beta=None, stdev=None, use_full_backward=False):
# Set each layer's perturbation eps and log_stdev's perturbation.
x = BoundedTensor(x, ptb=PerturbationLpNorm(norm=np.inf, eps=eps)).requires_grad_(False)
if forward_one:
inputs = (x, )
else:
inputs = (x, means)
if use_full_backward:
# Full backward method, tightest bound.
ilb, iub = model.compute_bounds(inputs, IBP=False, C=None, method="backward", bound_lower=True, bound_upper=True)
# Fake beta, avoid backward below.
beta = 1.0
else:
# IBP Pass.
ilb, iub = model.compute_bounds(inputs, IBP=True, C=None, method=None, bound_lower=True, bound_upper=True)
if beta is None or (1 - beta) > 1e-20:
# CROWN Pass.
clb, cub = model.compute_bounds(x=None, IBP=False, C=None, method='backward', bound_lower=True, bound_upper=True)
if beta is None:
# Bound final output neuron.
ikl = intermediate_to_kl(ilb, iub, means, stdev=stdev)
ckl = intermediate_to_kl(clb, cub, means, stdev=stdev)
return ikl, ckl
else:
# Beta schedule is from 0 to 1.
if 1 - beta < 1e-20:
lb = ilb
ub = iub
else:
lb = beta * ilb + (1 - beta) * clb
ub = beta * iub + (1 - beta) * cub
kl = intermediate_to_kl(lb, ub, means, stdev=stdev)
return kl
else:
def get_kl_bound(model, x, means, eps):
# Set each layer's perturbation eps and log_stdev's perturbation.
x = BoundedTensor(x, ptb=PerturbationLpNorm(norm=np.inf, eps=eps))
if forward_one:
inputs = (x, )
else:
inputs = (x, means)
# IBP Pass.
_, iub = model.compute_bounds(inputs, IBP=True, C=None, method=None, bound_lower=False, bound_upper=True)
# CROWN Pass.
_, cub = model.compute_bounds(x=None, IBP=False, C=None, method='backward', bound_lower=False, bound_upper=True)
# iub = cub
return iub, cub
def compute_perturbations(model, x, means, perturbations):
use_ibp = True
method = 'backward'
x = BoundedTensor(x, ptb=PerturbationLpNorm(norm=np.inf, eps=0))
inputs = (x, means)
for p in perturbations:
x.ptb.eps = p
lb, ub = model.compute_bounds(inputs, IBP=use_ibp, C=None, method=method, bound_lower=True, bound_upper=True)
lb = lb.detach().cpu().numpy().squeeze()
ub = ub.detach().cpu().numpy().squeeze()
print("eps={:.4f}, lb={}, ub={}".format(p, lb, ub))
x.ptb.eps = 0.0
lb, ub = model.compute_bounds(inputs, IBP=use_ibp, C=None, method=method, bound_lower=True, bound_upper=True)
lb = lb.detach().cpu().numpy().squeeze()
ub = ub.detach().cpu().numpy().squeeze()
print("eps=0.0000, lb={}, ub={}".format(lb, ub))
def main():
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
random.seed(1234)
np.random.seed(123)
input_size = 17
action_size = 6
## Step 1: Initial original model as usual; note that this model has BoundedParameter as its weight parameters
model_ori = RelaxedCtsPolicyForState(state_dim=input_size, action_dim=action_size)
state_dict = torch.load('test_policy_net.model')
if not forward_one:
state_dict['log_weight'] = state_dict['log_stdev']
del state_dict['log_stdev']
# model_ori.load_state_dict(state_dict)
## Step 2: Prepare dataset as usual
dummy_input1 = torch.randn(1, input_size)
dummy_input2 = torch.randn(1, action_size)
if forward_one:
inputs = (dummy_input1, )
else:
inputs = (dummy_input1, dummy_input2)
model_ori(*inputs)
# inputs = (dummy_input1, )
# dummy_input2 = model_ori.get_means(dummy_input1)
## Step 3: wrap model with auto_LiRPA
# The second parameter dummy_input is for constructing the trace of the computational graph.
model = BoundedModule(model_ori, inputs)
all_states = x = torch.randn(2, input_size)
means = model_ori.get_means(x).detach()
if forward_one:
print('prediction', model_ori(x).sum())
else:
print('prediction', model_ori(x, means).sum())
action_means = means
perturbations = np.arange(0.0, 0.1, 0.01)
compute_perturbations(model, x, means, perturbations)
if forward_one:
# pred = model_ori(all_states)
# pred = ((pred - means) ** 2).mean()
ikl, ckl = get_kl_bound(model, all_states, action_means, 0.1)
ikl, ckl = get_kl_bound(model, all_states, action_means, 0.0)
print('ikl', ikl.mean().item())
print('ckl', ckl.mean().item())
pred = (0.5 * ikl + 0.5 * ckl).mean()
pred.backward()
print('pred', pred.item())
else:
iub, cub = get_kl_bound(model, all_states, action_means, 0.1)
# iub, cub = get_kl_bound(model, all_states, action_means, 0)
# iub, cub = model_ori(all_states, action_means).mean()
print('iub', iub.mean().item())
print('cub', cub.mean().item())
kl = (0.5 * iub + 0.5 * cub).mean()
kl.backward()
print('kl', kl.item())
for p in model.parameters():
if p.grad is not None:
print(p.size(), p.grad.abs().sum().item())
# print(p.size(), p.grad)
else:
print(p.size(), p.grad)
if __name__ == "__main__":
main()
| 8,749 | 37.888889 | 125 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/policy_gradients/steps.py | import functools
import torch as ch
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
import numpy as np
import math
import time
from tqdm import tqdm
from torch.nn.utils import parameters_to_vector as flatten
from torch.nn.utils import vector_to_parameters as assign
from .torch_utils import *
import matplotlib as mpl
mpl.use('Agg') # No display
import matplotlib.pyplot as plt
from .convex_relaxation import get_kl_bound as get_state_kl_bound
'''
File for taking steps in both policy and value network space.
Layout of this file:
- Surrogate reward function
- Logging functions for TRPO approximations
- kl_approximation_logging
- kl_vs_second_order_approx
- Possible value loss functions
- consistency loss [+ clipped version for matching OpenAI]
- time-dependent baseline
- Actual optimization functions
- value_step
- ppo_step
- trpo_step
'''
def adv_normalize(adv, mask=None):
if mask is None:
if adv.nelement() == 1:
return adv
std = adv.std()
mean = adv.mean()
else:
masked_adv = adv[mask]
if masked_adv.nelement() == 1:
return adv
std = masked_adv.std()
mean = masked_adv.mean()
assert std != 0. and not ch.isnan(std), 'Need nonzero std'
n_advs = (adv - mean)/(std + 1e-8)
return n_advs
def surrogate_reward(adv, *, new, old, clip_eps=None, mask=None, normalize=True):
'''
Computes the surrogate reward for TRPO and PPO:
R(\theta) = E[r_t * A_t]
with support for clamping the ratio (for PPO), s.t.
R(\theta) = E[clamp(r_t, 1-e, 1+e) * A_t]
Inputs:
- adv, unnormalized advantages as calculated by the agents
- log_ps_new, the log probabilities assigned to taken events by \theta_{new}
- log_ps_old, the log probabilities assigned to taken events by \theta_{old}
- clip_EPS, the clipping boundary for PPO loss
Returns:
- The surrogate loss as described above
'''
log_ps_new, log_ps_old = new, old
if normalize:
# Normalized Advantages
n_advs = adv_normalize(adv, mask)
else:
n_advs = adv
assert shape_equal_cmp(log_ps_new, log_ps_old, n_advs)
# Ratio of new probabilities to old ones
ratio_new_old = ch.exp(log_ps_new - log_ps_old)
# Clamping (for use with PPO)
if clip_eps is not None:
ratio_new_old = ch.clamp(ratio_new_old, 1-clip_eps, 1+clip_eps)
return ratio_new_old * n_advs
######
# Possible Loss Functions for the value network
# Supports consistency loss, time-dependent baseline, OpenAI loss
# Also logs explained variance = MSE(values, targets)/Var[targets]
#####
def value_loss_gae(vs, _, advantages, not_dones, params, old_vs, mask=None, store=None, re=False, reduction='mean'):
'''
GAE-based loss for the value function:
L_t = ((v_t + A_t).detach() - v_{t})
Optionally, we clip the value function around the original value of v_t
Inputs: rewards, returns, not_dones, params (from value_step)
Outputs: value function loss
'''
# Desired values are old values plus advantage of the action taken. They do not change during the optimization process.
# We want the current values are close to them.
val_targ = (old_vs + advantages).detach()
assert shape_equal_cmp(val_targ, vs, not_dones, old_vs, advantages)
assert len(vs.shape) == 1 or len(vs.shape) == 2
try:
vs_clipped = old_vs + ch.clamp(vs - old_vs, -params.CLIP_VAL_EPS, params.CLIP_VAL_EPS)
except AttributeError as e:
vs_clipped = old_vs + ch.clamp(vs - old_vs, -params.CLIP_EPS, params.CLIP_EPS)
# Don't incur loss from last timesteps (since there's no return to use)
sel = ch.logical_and(not_dones.bool(), mask)
# print('selected', sel.sum().item())
assert shape_equal_cmp(vs, sel)
val_loss_mat_unclipped = (vs - val_targ)[sel].pow(2)
val_loss_mat_clipped = (vs_clipped - val_targ)[sel].pow(2)
# In OpenAI's PPO implementation, we clip the value function around the previous value estimate
# and use the worse of the clipped and unclipped versions to train the value function
# Presumably the inspiration for this is similar to PPO
if params.VALUE_CLIPPING:
val_loss_mat = ch.max(val_loss_mat_unclipped, val_loss_mat_clipped)
else:
val_loss_mat = val_loss_mat_unclipped
# assert shape_equal_cmp(val_loss_mat, vs)
# Mean squared loss
if reduction == 'mean':
mse = val_loss_mat.mean()
elif reduction == 'sum':
mse = val_loss_mat.sum()
else:
raise ValueError('Unknown reduction ' + reduction)
if re:
# Relative error.
se = not_dones.bool()
relerr = val_loss_mat/val_targ[se].abs()
mre = relerr.abs().mean()
msre = relerr.pow(2).mean()
return mse, mre, msre
return mse
def value_loss_returns(vs, returns, advantages, not_dones, params, old_vs,
mask=None, store=None, re=False):
'''
Returns (with time input) loss for the value function:
L_t = (R_t - v(s, t))
Inputs: rewards, returns, not_dones, params (from value_step)
Outputs: value function loss
'''
assert shape_equal_cmp(vs, returns)
sel = not_dones.bool()
val_loss_mat = (vs - returns)[sel]
mse = val_loss_mat.pow(2).mean()
val_targ = returns
if re:
relerr = val_loss_mat/val_targ[sel].abs()
mre = relerr.abs().mean()
msre = relerr.pow(2).mean()
return mse, mre, msre
return mse
###
# Optimization functions for the value and policy parameters
# value_step, ppo_step, trpo_step
###
def value_step(all_states, returns, advantages, not_dones, net,
val_opt, params, store, old_vs=None, opt_step=None,
should_tqdm=False, should_cuda=False, test_saps=None):
'''
Take an optimizer step fitting the value function
parameterized by a neural network
Inputs:
- all_states, the states at each timestep
- rewards, the rewards gained at each timestep
- returns, discounted rewards (ret_t = r_t + gamma*ret_{t+1})
- advantaages, estimated by GAE
- not_dones, N * T array with 0s at final steps and 1s everywhere else
- net, the neural network representing the value function
- val_opt, the optimizer for net
- params, dictionary of parameters
Returns:
- Loss of the value regression problem
'''
# (sharing weights) XOR (old_vs is None)
# assert params.SHARE_WEIGHTS ^ (old_vs is None)
# Options for value function
VALUE_FUNCS = {
"gae": value_loss_gae,
"time": value_loss_returns
}
# If we are not sharing weights, then we need to keep track of what the
# last value was here. If we are sharing weights, this is handled in policy_step
with ch.no_grad():
if old_vs is None:
state_indices = np.arange(returns.nelement())
# No shuffling, just split an sequential list of indices.
splits = np.array_split(state_indices, params.NUM_MINIBATCHES)
orig_vs = []
# Minibatch.
for selected in splits:
# Values of current network prediction.
orig_vs.append(net(all_states[selected]).squeeze(-1))
orig_vs = ch.cat(orig_vs)
old_vs = orig_vs.detach()
if test_saps is not None:
old_test_vs = net(test_saps.states).squeeze(-1)
"""
print('all_states', all_states.size())
print('returns', returns.size())
print('advantages', advantages.size())
print('not_dones', not_dones.size())
print('old_vs', old_vs.size())
"""
r = range(params.VAL_EPOCHS) if not should_tqdm else \
tqdm(range(params.VAL_EPOCHS))
if params.HISTORY_LENGTH > 0 and params.USE_LSTM_VAL:
# LSTM policy. Need to go over all episodes instead of states.
batches, alive_masks, time_masks, lengths = pack_history([all_states, returns, not_dones, advantages, old_vs], not_dones, max_length=params.HISTORY_LENGTH)
assert not params.SHARE_WEIGHTS
for i in r:
if params.HISTORY_LENGTH > 0 and params.USE_LSTM_VAL:
# LSTM policy. Need to go over all episodes instead of states.
hidden = None
val_opt.zero_grad()
val_loss = 0.0
for i, batch in enumerate(batches):
# Now we get chunks of time sequences, each of them with a maximum length of params.HISTORY_LENGTH.
# select log probabilities, advantages of this minibatch.
batch_states, batch_returns, batch_not_dones, batch_advs, batch_old_vs = batch
mask = time_masks[i]
# keep only the alive hidden states.
if hidden is not None:
# print('hidden[0]', hidden[0].size())
hidden = [h[:, alive_masks[i], :].detach() for h in hidden]
# print('hidden[0]', hidden[0].size())
vs, hidden = net.multi_forward(batch_states, hidden=hidden)
vs = vs.squeeze(-1)
"""
print('vs', vs.size())
print('batch_states', batch_states.size())
print('batch_returns', batch_returns.size())
print('batch_not_dones', batch_not_dones.size())
print('batch_advs', batch_advs.size())
print('batch_old_vs', batch_old_vs.size())
input()
"""
"""
print('old')
print(batch_old_vs)
print('new')
print(vs * mask)
print('diff')
print((batch_old_vs - vs * mask).pow(2).sum().item())
input()
"""
vf = VALUE_FUNCS[params.VALUE_CALC]
batch_val_loss = vf(vs, batch_returns, batch_advs, batch_not_dones, params,
batch_old_vs, mask=mask, store=store, reduction='sum')
val_loss += batch_val_loss
val_loss = val_loss / all_states.size(0)
val_loss.backward()
val_opt.step()
else:
# Create minibatches with shuffuling
state_indices = np.arange(returns.nelement())
np.random.shuffle(state_indices)
splits = np.array_split(state_indices, params.NUM_MINIBATCHES)
assert shape_equal_cmp(returns, advantages, not_dones, old_vs)
# Minibatch SGD
for selected in splits:
val_opt.zero_grad()
def sel(*args):
return [v[selected] for v in args]
def to_cuda(*args):
return [v.cuda() for v in args]
# Get a minibatch (64) of returns, advantages, etc.
tup = sel(returns, advantages, not_dones, old_vs, all_states)
mask = ch.tensor(True)
if should_cuda: tup = to_cuda(*tup)
sel_rets, sel_advs, sel_not_dones, sel_ovs, sel_states = tup
# Value prediction of current network given the states.
vs = net(sel_states).squeeze(-1)
vf = VALUE_FUNCS[params.VALUE_CALC]
val_loss = vf(vs, sel_rets, sel_advs, sel_not_dones, params,
sel_ovs, mask=mask, store=store)
# If we are sharing weights, then value_step gets called
# once per policy optimizer step anyways, so we only do one batch
if params.SHARE_WEIGHTS:
return val_loss
# From now on, params.SHARE_WEIGHTS must be False
val_loss.backward()
val_opt.step()
if should_tqdm:
if test_saps is not None:
vs = net(test_saps.states).squeeze(-1)
test_loss = vf(vs, test_saps.returns, test_saps.advantages,
test_saps.not_dones, params, old_test_vs, None)
r.set_description(f'vf_train: {val_loss.mean().item():.2f}'
f'vf_test: {test_loss.mean().item():.2f}')
print(f'val_loss={val_loss.item():8.5f}')
return val_loss
def pack_history(features, not_dones, max_length):
# Features is a list, each element has dimension (N, state_dim) or (N, ) where N contains a few episodes
# not_dones splits these episodes (0 in not_dones is end of an episode)
nnz = ch.nonzero(1.0 - not_dones, as_tuple=False).view(-1).cpu().numpy()
# nnz has the position where not_dones = 0 (end of episode)
assert isinstance(features, list)
# Check dimension. All tensors must have the same dimension.
size = features[0].size(0)
for t in features:
assert size == t.size(0)
all_pieces = [[] for i in range(len(features))]
lengths = []
start = 0
for i in nnz:
end = i + 1
for (a, b) in zip(all_pieces, features):
a.append(b[start:end])
lengths.append(end - start)
start = end
# The last episode is missing, unless the previous episode end at the last element.
if end != size:
for (a, b) in zip(all_pieces, features):
a.append(b[end:])
lengths.append(size - end)
# First pad to longest sequence
padded_features = [pad_sequence(a, batch_first=True) for a in all_pieces]
# Then pad to a multiple of max_length
longest = padded_features[0].size(1)
extra = int(math.ceil(longest / max_length) * max_length - longest)
new_padded_features = []
for t in padded_features:
if t.ndim == 3:
new_tensor = ch.zeros(t.size(0), extra, t.size(2))
else:
new_tensor = ch.zeros(t.size(0), extra)
new_tensor = ch.cat([t, new_tensor], dim=1)
new_padded_features.append(new_tensor)
del padded_features
# now divide padded features into chunks with max_length.
nbatches = new_padded_features[0].size(1) // max_length
alive_masks = [] # which batch still alives after a chunk
# time step masks for each chunk, each batch.
time_masks = []
batches = [[] for i in range(nbatches)] # batch of batches
alive = ch.tensor(lengths)
alive_iter = ch.tensor(lengths)
for i in range(nbatches):
full_mask = alive > 0
iter_mask = alive_iter > 0
for t in new_padded_features:
# only keep the tensors that are alive
batches[i].append(t[full_mask, i * max_length : i * max_length + max_length])
# Remove deleted batches
alive_iter = alive_iter[iter_mask]
time_mask = alive_iter.view(-1, 1) > ch.arange(max_length).view(1, -1)
alive -= max_length
alive_iter -= max_length
alive_masks.append(iter_mask)
time_masks.append(time_mask)
return batches, alive_masks, time_masks, lengths
def ppo_step(all_states, actions, old_log_ps, rewards, returns, not_dones,
advs, net, params, store, opt_step):
'''
Proximal Policy Optimization
Runs K epochs of PPO as in https://arxiv.org/abs/1707.06347
Inputs:
- all_states, the historical value of all the states
- actions, the actions that the policy sampled
- old_log_ps, the log probability of the actions that the policy sampled
- advs, advantages as estimated by GAE
- net, policy network to train [WILL BE MUTATED]
- params, additional placeholder for parameters like EPS
Returns:
- The PPO loss; main job is to mutate the net
'''
# Storing batches of stuff
# if store is not None:
# orig_dists = net(all_states)
### ACTUAL PPO OPTIMIZATION START
if params.SHARE_WEIGHTS:
orig_vs = net.get_value(all_states).squeeze(-1).view([params.NUM_ACTORS, -1])
old_vs = orig_vs.detach()
"""
print(all_states.size())
print(actions.size())
print(old_log_ps.size())
print(advs.size())
print(params.HISTORY_LENGTH)
print(not_dones.size())
"""
if params.HISTORY_LENGTH > 0:
# LSTM policy. Need to go over all episodes instead of states.
# We normalize all advantages at once instead of batch by batch, since each batch may contain different number of samples.
normalized_advs = adv_normalize(advs)
batches, alive_masks, time_masks, lengths = pack_history([all_states, actions, old_log_ps, normalized_advs], not_dones, max_length=params.HISTORY_LENGTH)
for _ in range(params.PPO_EPOCHS):
if params.HISTORY_LENGTH > 0:
# LSTM policy. Need to go over all episodes instead of states.
params.POLICY_ADAM.zero_grad()
hidden = None
surrogate = 0.0
for i, batch in enumerate(batches):
# Now we get chunks of time sequences, each of them with a maximum length of params.HISTORY_LENGTH.
# select log probabilities, advantages of this minibatch.
batch_states, batch_actions, batch_old_log_ps, batch_advs = batch
mask = time_masks[i]
"""
print('batch states', batch_states.size())
print('batch actions', batch_actions.size())
print('batch old_log_ps', batch_old_log_ps.size())
print('batch advs', batch_advs.size())
print('alive mask', alive_masks[i].size(), alive_masks[i].sum())
print('mask', mask.size())
"""
# keep only the alive hidden states.
if hidden is not None:
# print('hidden[0]', hidden[0].size())
hidden = [h[:, alive_masks[i], :].detach() for h in hidden]
# print('hidden[0]', hidden[0].size())
# dist contains mean and variance of Gaussian.
mean, std, hidden = net.multi_forward(batch_states, hidden=hidden)
dist = mean, std
# Convert state distribution to log likelyhood.
new_log_ps = net.get_loglikelihood(dist, batch_actions)
# print('batch new_log_ps', new_log_ps.size())
"""
print('old')
print(batch_old_log_ps)
print('new')
print(new_log_ps * mask)
print('diff')
print((batch_old_log_ps - new_log_ps * mask).pow(2).sum().item())
"""
shape_equal_cmp(new_log_ps, batch_old_log_ps)
# Calculate rewards
# the surrogate rewards is basically exp(new_log_ps - old_log_ps) * advantage
# dimension is the same as minibatch size.
# We already normalized advs before. No need to normalize here.
unclp_rew = surrogate_reward(batch_advs, new=new_log_ps, old=batch_old_log_ps, mask=mask, normalize=False)
clp_rew = surrogate_reward(batch_advs, new=new_log_ps, old=batch_old_log_ps,
clip_eps=params.CLIP_EPS, mask=mask, normalize=False)
# Total loss, is the min of clipped and unclipped reward for each state, averaged.
surrogate_batch = (-ch.min(unclp_rew, clp_rew) * mask).sum()
# We sum the batch loss here because each batch contains uneven number of trajactories.
surrogate = surrogate + surrogate_batch
# Divide surrogate loss by number of samples in this batch.
surrogate = surrogate / all_states.size(0)
# Calculate entropy bonus
# So far, the entropy only depends on std and does not depend on time. No need to mask.
entropy_bonus = net.entropies(dist)
entropy = -params.ENTROPY_COEFF * entropy_bonus
loss = surrogate + entropy
# optimizer (only ADAM)
loss.backward()
if params.CLIP_GRAD_NORM != -1:
ch.nn.utils.clip_grad_norm(net.parameters(), params.CLIP_GRAD_NORM)
params.POLICY_ADAM.step()
else:
# Memoryless policy.
# State is in shape (experience_size, observation_size). Usually 2048.
state_indices = np.arange(all_states.shape[0])
np.random.shuffle(state_indices)
# We use a minibatch of states to do optimization, and each epoch contains several iterations.
splits = np.array_split(state_indices, params.NUM_MINIBATCHES)
# A typical mini-batch size is 2048/32=64
for selected in splits:
def sel(*args, offset=0):
if offset == 0:
return [v[selected] for v in args]
else:
offset_selected = selected + offset
return [v[offset_selected] for v in args]
# old_log_ps: log probabilities of actions sampled based in experience buffer.
# advs: advantages of these states.
# both old_log_ps and advs are in shape (experience_size,) = 2048.
# Using memoryless policy.
tup = sel(all_states, actions, old_log_ps, advs)
# select log probabilities, advantages of this minibatch.
batch_states, batch_actions, batch_old_log_ps, batch_advs = tup
# print(batch_actions.size())
# print(batch_advs.size())
# Forward propagation on current parameters (being constantly updated), to get distribution of these states
# dist contains mean and variance of Gaussian.
dist = net(batch_states)
# print('dist', dist[0].size())
# print('batch_actions', batch_actions.size())
# Convert state distribution to log likelyhood.
new_log_ps = net.get_loglikelihood(dist, batch_actions)
# print('new_log_ps', new_log_ps.size())
# print('old_log_ps', batch_old_log_ps.size())
shape_equal_cmp(new_log_ps, batch_old_log_ps)
# Calculate rewards
# the surrogate rewards is basically exp(new_log_ps - old_log_ps) * advantage
# dimension is the same as minibatch size.
unclp_rew = surrogate_reward(batch_advs, new=new_log_ps, old=batch_old_log_ps)
clp_rew = surrogate_reward(batch_advs, new=new_log_ps, old=batch_old_log_ps,
clip_eps=params.CLIP_EPS)
# Calculate entropy bonus
# So far, the entropy only depends on std and does not depend on time. No need to mask.
entropy_bonus = net.entropies(dist).mean()
# Total loss, is the min of clipped and unclipped reward for each state, averaged.
surrogate = (-ch.min(unclp_rew, clp_rew)).mean()
entropy = -params.ENTROPY_COEFF * entropy_bonus
loss = surrogate + entropy
# If we are sharing weights, take the value step simultaneously
# (since the policy and value networks depend on the same weights)
if params.SHARE_WEIGHTS:
tup = sel(returns, not_dones, old_vs)
batch_returns, batch_not_dones, batch_old_vs = tup
val_loss = value_step(batch_states, batch_returns, batch_advs,
batch_not_dones, net.get_value, None, params,
store, old_vs=batch_old_vs, opt_step=opt_step)
loss += params.VALUE_MULTIPLIER * val_loss
# Optimizer step (Adam or SGD)
if params.POLICY_ADAM is None:
grad = ch.autograd.grad(loss, net.parameters())
flat_grad = flatten(grad)
if params.CLIP_GRAD_NORM != -1:
norm_grad = ch.norm(flat_grad)
flat_grad = flat_grad if norm_grad <= params.CLIP_GRAD_NORM else \
flat_grad / norm_grad * params.CLIP_GRAD_NORM
assign(flatten(net.parameters()) - params.PPO_LR * flat_grad, net.parameters())
else:
params.POLICY_ADAM.zero_grad()
loss.backward()
if params.CLIP_GRAD_NORM != -1:
ch.nn.utils.clip_grad_norm(net.parameters(), params.CLIP_GRAD_NORM)
params.POLICY_ADAM.step()
print(f'surrogate={surrogate.item():8.5f}, entropy={entropy_bonus.item():8.5f}, loss={loss.item():8.5f}')
std = ch.exp(net.log_stdev)
print(f'std_min={std.min().item():8.5f}, std_max={std.max().item():8.5f}, std_mean={std.mean().item():8.5f}')
return loss.item(), surrogate.item(), entropy.item()
"""Computing an estimated upper bound of KL divergence using SGLD."""
def get_state_kl_bound_sgld(net, batch_states, batch_action_means, eps, steps, stdev, not_dones=None):
if not_dones is not None:
# If we have not_dones, the underlying network is a LSTM.
wrapped_net = functools.partial(net, not_dones=not_dones)
else:
wrapped_net = net
if batch_action_means is None:
# Not provided. We need to compute them.
with ch.no_grad():
batch_action_means, _ = wrapped_net(batch_states)
else:
batch_action_means = batch_action_means.detach()
# upper and lower bounds for clipping
states_ub = batch_states + eps
states_lb = batch_states - eps
step_eps = eps / steps
# SGLD noise factor. We set (inverse) beta=1e-5 as gradients are relatively small here.
beta = 1e-5
noise_factor = np.sqrt(2 * step_eps * beta)
noise = ch.randn_like(batch_states) * noise_factor
var_states = (batch_states.clone() + noise.sign() * step_eps).detach().requires_grad_()
for i in range(steps):
# Find a nearby state new_phi that maximize the difference
diff = (wrapped_net(var_states)[0] - batch_action_means) / stdev.detach()
kl = (diff * diff).sum(axis=-1, keepdim=True).mean()
# Need to clear gradients before the backward() for policy_loss
kl.backward()
# Reduce noise at every step.
noise_factor = np.sqrt(2 * step_eps * beta) / (i+2)
# Project noisy gradient to step boundary.
update = (var_states.grad + noise_factor * ch.randn_like(var_states)).sign() * step_eps
var_states.data += update
# clip into the upper and lower bounds
var_states = ch.max(var_states, states_lb)
var_states = ch.min(var_states, states_ub)
var_states = var_states.detach().requires_grad_()
net.zero_grad()
diff = (wrapped_net(var_states.requires_grad_(False))[0] - batch_action_means) / stdev
return (diff * diff).sum(axis=-1, keepdim=True)
def robust_ppo_step(all_states, actions, old_log_ps, rewards, returns, not_dones,
advs, net, params, store, opt_step, relaxed_net, eps_scheduler, beta_scheduler):
'''
Proximal Policy Optimization with robustness regularizer
Runs K epochs of PPO as in https://arxiv.org/abs/1707.06347
Inputs:
- all_states, the historical value of all the states
- actions, the actions that the policy sampled
- old_log_ps, the log probability of the actions that the policy sampled
- advs, advantages as estimated by GAE
- net, policy network to train [WILL BE MUTATED]
- params, additional placeholder for parameters like EPS
Returns:
- The PPO loss; main job is to mutate the net
'''
# Storing batches of stuff
# if store is not None:
# orig_dists = net(all_states)
### ACTUAL PPO OPTIMIZATION START
if params.SHARE_WEIGHTS:
orig_vs = net.get_value(all_states).squeeze(-1).view([params.NUM_ACTORS, -1])
old_vs = orig_vs.detach()
# We treat all PPO epochs as one epoch.
eps_scheduler.set_epoch_length(params.PPO_EPOCHS * params.NUM_MINIBATCHES)
beta_scheduler.set_epoch_length(params.PPO_EPOCHS * params.NUM_MINIBATCHES)
# We count from 1.
eps_scheduler.step_epoch()
beta_scheduler.step_epoch()
if params.HISTORY_LENGTH > 0:
# LSTM policy. Need to go over all episodes instead of states.
# We normalize all advantages at once instead of batch by batch, since each batch may contain different number of samples.
normalized_advs = adv_normalize(advs)
batches, alive_masks, time_masks, lengths = pack_history([all_states, actions, old_log_ps, normalized_advs], not_dones, max_length=params.HISTORY_LENGTH)
for _ in range(params.PPO_EPOCHS):
if params.HISTORY_LENGTH > 0:
# LSTM policy. Need to go over all episodes instead of states.
params.POLICY_ADAM.zero_grad()
hidden = None
surrogate = 0.0
for i, batch in enumerate(batches):
# Now we get chunks of time sequences, each of them with a maximum length of params.HISTORY_LENGTH.
# select log probabilities, advantages of this minibatch.
batch_states, batch_actions, batch_old_log_ps, batch_advs = batch
mask = time_masks[i]
"""
print('batch states', batch_states.size())
print('batch actions', batch_actions.size())
print('batch old_log_ps', batch_old_log_ps.size())
print('batch advs', batch_advs.size())
print('alive mask', alive_masks[i].size(), alive_masks[i].sum())
print('mask', mask.size())
"""
# keep only the alive hidden states.
if hidden is not None:
# print('hidden[0]', hidden[0].size())
hidden = [h[:, alive_masks[i], :].detach() for h in hidden]
# print('hidden[0]', hidden[0].size())
# dist contains mean and variance of Gaussian.
mean, std, hidden = net.multi_forward(batch_states, hidden=hidden)
dist = mean, std
# Convert state distribution to log likelyhood.
new_log_ps = net.get_loglikelihood(dist, batch_actions)
# print('batch new_log_ps', new_log_ps.size())
"""
print('old')
print(batch_old_log_ps)
print('new')
print(new_log_ps * mask)
print('diff')
print((batch_old_log_ps - new_log_ps * mask).pow(2).sum().item())
"""
shape_equal_cmp(new_log_ps, batch_old_log_ps)
# Calculate rewards
# the surrogate rewards is basically exp(new_log_ps - old_log_ps) * advantage
# dimension is the same as minibatch size.
# We already normalized advs before. No need to normalize here.
unclp_rew = surrogate_reward(batch_advs, new=new_log_ps, old=batch_old_log_ps, mask=mask, normalize=False)
clp_rew = surrogate_reward(batch_advs, new=new_log_ps, old=batch_old_log_ps,
clip_eps=params.CLIP_EPS, mask=mask, normalize=False)
# Total loss, is the min of clipped and unclipped reward for each state, averaged.
surrogate_batch = (-ch.min(unclp_rew, clp_rew) * mask).sum()
# We sum the batch loss here because each batch contains uneven number of trajactories.
surrogate = surrogate + surrogate_batch
# Divide surrogate loss by number of samples in this batch.
surrogate = surrogate / all_states.size(0)
# Calculate entropy bonus
# So far, the entropy only depends on std and does not depend on time. No need to mask.
entropy_bonus = net.entropies(dist)
# Calculate regularizer under state perturbation.
eps_scheduler.step_batch()
beta_scheduler.step_batch()
batch_action_means = None
current_eps = eps_scheduler.get_eps()
stdev = ch.exp(net.log_stdev)
if params.ROBUST_PPO_DETACH_STDEV:
# Detach stdev so that it won't be too large.
stdev = stdev.detach()
if params.ROBUST_PPO_METHOD == "sgld":
kl_upper_bound = get_state_kl_bound_sgld(net, all_states, None,
eps=current_eps, steps=params.ROBUST_PPO_PGD_STEPS,
stdev=stdev, not_dones=not_dones).mean()
else:
raise ValueError(f"Unsupported robust PPO method {params.ROBUST_PPO_METHOD}")
entropy = -params.ENTROPY_COEFF * entropy_bonus
loss = surrogate + entropy + params.ROBUST_PPO_REG * kl_upper_bound
# optimizer (only ADAM)
loss.backward()
if params.CLIP_GRAD_NORM != -1:
ch.nn.utils.clip_grad_norm(net.parameters(), params.CLIP_GRAD_NORM)
params.POLICY_ADAM.step()
else:
# Memoryless policy.
# State is in shape (experience_size, observation_size). Usually 2048.
state_indices = np.arange(all_states.shape[0])
np.random.shuffle(state_indices)
# We use a minibatch of states to do optimization, and each epoch contains several iterations.
splits = np.array_split(state_indices, params.NUM_MINIBATCHES)
# A typical mini-batch size is 2048/32=64
for selected in splits:
def sel(*args):
return [v[selected] for v in args]
# old_log_ps: log probabilities of actions sampled based in experience buffer.
# advs: advantages of these states.
# both old_log_ps and advs are in shape (experience_size,) = 2048.
tup = sel(all_states, actions, old_log_ps, advs)
# select log probabilities, advantages of this minibatch.
batch_states, batch_actions, batch_old_log_ps, batch_advs = tup
# Forward propagation on current parameters (being constantly updated), to get distribution of these states
# dist contains mean and variance of Gaussian.
dist = net(batch_states)
# Convert state distribution to log likelyhood.
new_log_ps = net.get_loglikelihood(dist, batch_actions)
shape_equal_cmp(new_log_ps, batch_old_log_ps)
# Calculate rewards
# the surrogate rewards is basically exp(new_log_ps - old_log_ps) * advantage
# dimension is the same as minibatch size.
unclp_rew = surrogate_reward(batch_advs, new=new_log_ps, old=batch_old_log_ps)
clp_rew = surrogate_reward(batch_advs, new=new_log_ps, old=batch_old_log_ps,
clip_eps=params.CLIP_EPS)
# Calculate entropy bonus
entropy_bonus = net.entropies(dist).mean()
# Calculate regularizer under state perturbation.
eps_scheduler.step_batch()
beta_scheduler.step_batch()
batch_action_means = dist[0]
current_eps = eps_scheduler.get_eps()
stdev = ch.exp(net.log_stdev)
if params.ROBUST_PPO_DETACH_STDEV:
# Detach stdev so that it won't be too large.
stdev = stdev.detach()
if params.ROBUST_PPO_METHOD == "convex-relax":
kl_upper_bound = get_state_kl_bound(relaxed_net, batch_states, batch_action_means,
eps=current_eps, beta=beta_scheduler.get_eps(),
stdev=stdev).mean()
elif params.ROBUST_PPO_METHOD == "sgld":
kl_upper_bound = get_state_kl_bound_sgld(net, batch_states, batch_action_means,
eps=current_eps, steps=params.ROBUST_PPO_PGD_STEPS,
stdev=stdev).mean()
else:
raise ValueError(f"Unsupported robust PPO method {params.ROBUST_PPO_METHOD}")
# Total loss, is the min of clipped and unclipped reward for each state, averaged.
surrogate = -ch.min(unclp_rew, clp_rew).mean()
entropy = -params.ENTROPY_COEFF * entropy_bonus
loss = surrogate + entropy + params.ROBUST_PPO_REG * kl_upper_bound
# If we are sharing weights, take the value step simultaneously
# (since the policy and value networks depend on the same weights)
if params.SHARE_WEIGHTS:
tup = sel(returns, not_dones, old_vs)
batch_returns, batch_not_dones, batch_old_vs = tup
val_loss = value_step(batch_states, batch_returns, batch_advs,
batch_not_dones, net.get_value, None, params,
store, old_vs=batch_old_vs, opt_step=opt_step)
loss += params.VALUE_MULTIPLIER * val_loss
# Optimizer step (Adam or SGD)
if params.POLICY_ADAM is None:
grad = ch.autograd.grad(loss, net.parameters())
flat_grad = flatten(grad)
if params.CLIP_GRAD_NORM != -1:
norm_grad = ch.norm(flat_grad)
flat_grad = flat_grad if norm_grad <= params.CLIP_GRAD_NORM else \
flat_grad / norm_grad * params.CLIP_GRAD_NORM
assign(flatten(net.parameters()) - params.PPO_LR * flat_grad, net.parameters())
else:
params.POLICY_ADAM.zero_grad()
loss.backward()
if params.CLIP_GRAD_NORM != -1:
ch.nn.utils.clip_grad_norm(net.parameters(), params.CLIP_GRAD_NORM)
params.POLICY_ADAM.step()
# Logging.
kl_upper_bound = kl_upper_bound.item()
surrogate = surrogate.item()
entropy_bonus = entropy_bonus.item()
print(f'eps={eps_scheduler.get_eps():8.6f}, beta={beta_scheduler.get_eps():8.6f}, kl={kl_upper_bound:10.5g}, '
f'surrogate={surrogate:8.5f}, entropy={entropy_bonus:8.5f}, loss={loss.item():8.5f}')
std = ch.exp(net.log_stdev)
print(f'std_min={std.min().item():8.5f}, std_max={std.max().item():8.5f}, std_mean={std.mean().item():8.5f}')
if store is not None:
# TODO: ADV: add row name suffix
row ={
'eps': eps_scheduler.get_eps(),
'beta': beta_scheduler.get_eps(),
'kl': kl_upper_bound,
'surrogate': surrogate,
'entropy': entropy_bonus,
'loss': loss.item(),
}
store.log_table_and_tb('robust_ppo_data', row)
return loss.item(), surrogate, entropy_bonus
def trpo_step(all_states, actions, old_log_ps, rewards, returns, not_dones, advs, net, params, store, opt_step):
'''
Trust Region Policy Optimization
Runs K epochs of TRPO as in https://arxiv.org/abs/1502.05477
Inputs:
- all_states, the historical value of all the states
- actions, the actions that the policy sampled
- old_log_ps, the probability of the actions that the policy sampled
- advs, advantages as estimated by GAE
- net, policy network to train [WILL BE MUTATED]
- params, additional placeholder for parameters like EPS
Returns:
- The TRPO loss; main job is to mutate the net
'''
# Initial setup
initial_parameters = flatten(net.parameters()).clone()
# all_states is in shape (experience_size, observation_size). Usually 2048 experiences.
# Get mean and std of action distribution for all experiences.
pds = net(all_states)
# And compute the log probabilities for the actions chosen at rollout time.
action_log_probs = net.get_loglikelihood(pds, actions)
# Calculate losses
surr_rew = surrogate_reward(advs, new=action_log_probs, old=old_log_ps).mean()
grad = ch.autograd.grad(surr_rew, net.parameters(), retain_graph=True)
# This represents the computation of gradient, and will be used to obtain 2nd order.
flat_grad = flatten(grad)
# Make fisher product estimator. Only use a fraction of examples.
num_samples = int(all_states.shape[0] * params.FISHER_FRAC_SAMPLES)
selected = np.random.choice(range(all_states.shape[0]), num_samples, replace=False)
detached_selected_pds = select_prob_dists(pds, selected, detach=True)
selected_pds = select_prob_dists(pds, selected, detach=False)
# Construct the KL divergence which we will optimize on. This is essentially 0, but what we care about is the Hessian.
# We want to know when the network parameter changes, how the K-L divergence of network output changes.
kl = net.calc_kl(detached_selected_pds, selected_pds).mean()
# g is the gradient of the KL divergence w.r.t to parameters. It is 0 at the starting point.
g = flatten(ch.autograd.grad(kl, net.parameters(), create_graph=True))
'''
Fisher matrix to vector x product. Essentially, a Hessian-vector product of K-L divergence w.r.t network parameter.
'''
def fisher_product(x, damp_coef=1.):
contig_flat = lambda q: ch.cat([y.contiguous().view(-1) for y in q])
# z is the gradient-vector product. Take the derivation of it to get Hessian vector product.
z = g @ x
hv = ch.autograd.grad(z, net.parameters(), retain_graph=True)
return contig_flat(hv).detach() + x*params.DAMPING * damp_coef
# Find KL constrained gradient step
# The Fisher matrix A is unknown, but we can compute the product.
# flat_grad is the right-hand side value b. Want to solve x in Ax = b
step = cg_solve(fisher_product, flat_grad, params.CG_STEPS)
# Return the solution. "step" has size of network parameters.
max_step_coeff = (2 * params.MAX_KL / (step @ fisher_product(step)))**(0.5)
max_trpo_step = max_step_coeff * step
if store and params.SHOULD_LOG_KL:
kl_approximation_logging(all_states, pds, flat_grad, step, net, store)
kl_vs_second_order_approx(all_states, pds, net, max_trpo_step, params, store, opt_step)
# Backtracking line search
with ch.no_grad():
# Backtracking function, which gives the improvement on objective given an update direction s.
def backtrack_fn(s):
assign(initial_parameters + s.data, net.parameters())
test_pds = net(all_states)
test_action_log_probs = net.get_loglikelihood(test_pds, actions)
new_reward = surrogate_reward(advs, new=test_action_log_probs, old=old_log_ps).mean()
# surr_new is the surrogate before optimization.
# We need to make sure the loss is improving, and KL between old probabilites are not too large.
if params.TRPO_KL_REDUCE_FUNC == 'mean':
kl_metric = net.calc_kl(pds, test_pds).mean()
elif params.TRPO_KL_REDUCE_FUNC == 'max':
kl_metric = net.calc_kl(pds, test_pds).max()
else:
raise ValueError("unknown reduce function " + params.TRPO_KL_REDUCE_FUNC)
if new_reward <= surr_rew or kl_metric > params.MAX_KL:
return -float('inf')
return new_reward - surr_rew
expected_improve = flat_grad @ max_trpo_step
# max_trpo_step is the search direction. Backtracking line search will find a scaler for it.
# expected_improve is the expected decrease in loss estimated by gradient.
# backtracking_line_search will try a scaler 0.5, 0.25, 0.125, etc to achieve expected improvement.
final_step = backtracking_line_search(backtrack_fn, max_trpo_step,
expected_improve,
num_tries=params.MAX_BACKTRACK)
assign(initial_parameters + final_step, net.parameters())
# entropy regularization not used for TRPO so return 0.
return surr_rew.item(), 0.0, 0.0
def step_with_mode(mode, adversary=False):
STEPS = {
'trpo': trpo_step,
'ppo': ppo_step,
'robust_ppo': robust_ppo_step,
'adv_ppo': ppo_step,
'adv_trpo': trpo_step,
'adv_sa_ppo': robust_ppo_step,
}
ADV_STEPS = {
'trpo': None,
'ppo': None,
'robust_ppo': None,
'adv_ppo': ppo_step,
'adv_trpo': trpo_step,
'adv_sa_ppo': ppo_step,
}
if adversary:
return ADV_STEPS[mode]
else:
return STEPS[mode]
def get_params_norm(net, p=2):
layer_norms = []
layer_norms_dict = {}
for name, params in net.named_parameters():
if name != 'log_stdev' and name != 'log_weight' and params.ndim != 1:
norm = ch.norm(params.view(-1), p=p).item() / np.prod(params.size())
layer_norms.append(norm)
layer_norms_dict[name] = norm
return np.array(layer_norms), layer_norms_dict
last_norm = None
| 45,502 | 45.0091 | 163 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/policy_gradients/logging.py | import torch as ch
import numpy as np
from .torch_utils import *
from torch.nn.utils import parameters_to_vector as flatten
from torch.nn.utils import vector_to_parameters as assign
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import pairwise_distances
from .steps import value_loss_returns, value_loss_gae, adv_normalize
def log_weight_updates(agent, old_params, new_params, table='weight_updates'):
row = {}
for name in old_params.keys():
new_w = new_params[name]
old_w = old_params[name]
diff_w = new_w - old_w
l1 = ch.norm(new_w.view(-1), p=1).item()
l2 = ch.norm(new_w.view(-1), p=2).item()
linf = ch.norm(new_w.view(-1), p=np.inf).item()
l1_delta = ch.norm(diff_w.view(-1), p=1).item()
l2_delta = ch.norm(diff_w.view(-1), p=2).item()
linf_delta = ch.norm(diff_w.view(-1), p=np.inf).item()
print('layer {}:\tlinf={:.5g} l2={:.5g} l1={:.5g}\tdelta_linf={:.5g} delta_l2={:.5g} delta_l1={:.5g}'.format(
name, linf, l2, l1, linf_delta, l2_delta, l1_delta))
name += '.'
row[name + "l1"] = l1
row[name + "l2"] = l2
row[name + "linf"] = linf
row[name + "delta_l1"] = l1_delta
row[name + "delta_l2"] = l2_delta
row[name + "delta_linf"] = linf_delta
agent.store.log_table_and_tb(table, row)
#####
# Understanding TRPO approximations for KL constraint
#####
def paper_constraints_logging(agent, saps, old_pds, table):
'''Computes average, max KL and max clipping ratio'''
# New mean and variance.
new_pds = agent.policy_model(saps.states)
# Get the likelyhood of old actions under the new Gaussian distribution.
new_log_ps = agent.policy_model.get_loglikelihood(new_pds,
saps.actions)
# Likelyhood of the old actions, under new and old action distributions.
ratios = ch.exp(new_log_ps - saps.action_log_probs)
max_rat = ratios.max()
kls = agent.policy_model.calc_kl(old_pds, new_pds)
avg_kl = kls.mean()
max_kl = kls.max()
row = {
'avg_kl':avg_kl,
'max_kl':max_kl,
'max_ratio':max_rat,
'opt_step':agent.n_steps,
}
print(f'Step {agent.n_steps}, avg_kl {avg_kl:.5f}, max_kl {max_kl:.5f}, max_ratio {max_rat:.5f}')
for k in row:
if k != 'opt_step':
row[k] = float(row[k])
agent.store.log_table_and_tb(table, row)
##
# Treating value learning as a supervised learning problem:
# How well do we do?
##
def log_value_losses(agent, saps, label_prefix, table='value_data'):
'''
Computes the validation loss of the value function modeling it
as a supervised learning of returns. Calculates the loss using
all three admissible loss functions (returns, consistency, mixed).
Inputs: None
Outputs: None, logs to the store
'''
with ch.no_grad():
# Compute validation loss
new_values = agent.val_model(saps.states).squeeze(-1)
args = [new_values, saps.returns, saps.advantages, saps.not_dones,
agent.params, saps.values]
returns_loss, returns_mre, returns_msre = value_loss_returns(*args, re=True)
gae_loss, gae_mre, gae_msre = value_loss_gae(*args, re=True)
agent.store.log_table_and_tb(table, {
('%s_returns_loss' % label_prefix): returns_loss,
('%s_gae_loss' % label_prefix): gae_loss,
})
| 3,474 | 36.365591 | 117 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/policy_gradients/custom_env.py | import os
import numpy as np
from PIL import Image
from gym.spaces.discrete import Discrete
from gym.spaces.box import Box as Continuous
import gym
import random
from .torch_utils import RunningStat, ZFilter, Identity, StateWithTime, RewardFilter
class Env:
'''
A wrapper around the OpenAI gym environment that adds support for the following:
- Rewards normalization
- State normalization
- Adding timestep as a feature with a particular horizon T
Also provides utility functions/properties for:
- Whether the env is discrete or continuous
- Size of feature space
- Size of action space
Provides the same API (init, step, reset) as the OpenAI gym
'''
def __init__(self, game, norm_states, norm_rewards, params, add_t_with_horizon=None, clip_obs=None, clip_rew=None,
show_env=False, save_frames=False, save_frames_path=""):
self.env = gym.make(game)
clip_obs = None if clip_obs < 0 else clip_obs
clip_rew = None if clip_rew < 0 else clip_rew
# Environment type
self.is_discrete = type(self.env.action_space) == Discrete
assert self.is_discrete or type(self.env.action_space) == Continuous
# Number of actions
action_shape = self.env.action_space.shape
assert len(action_shape) <= 1 # scalar or vector actions
self.num_actions = self.env.action_space.n if self.is_discrete else 0 \
if len(action_shape) == 0 else action_shape[0]
# Number of features
assert len(self.env.observation_space.shape) == 1
self.num_features = self.env.reset().shape[0]
# Support for state normalization or using time as a feature
self.state_filter = Identity()
if norm_states:
self.state_filter = ZFilter(self.state_filter, shape=[self.num_features], \
clip=clip_obs)
if add_t_with_horizon is not None:
self.state_filter = StateWithTime(self.state_filter, horizon=add_t_with_horizon)
# Support for rewards normalization
self.reward_filter = Identity()
if norm_rewards == "rewards":
self.reward_filter = ZFilter(self.reward_filter, shape=(), center=False, clip=clip_rew)
elif norm_rewards == "returns":
self.reward_filter = RewardFilter(self.reward_filter, shape=(), gamma=params.GAMMA, clip=clip_rew)
# Running total reward (set to 0.0 at resets)
self.total_true_reward = 0.0
# Set normalizers to read-write mode by default.
self._read_only = False
self.setup_visualization(show_env, save_frames, save_frames_path)
# For environments that are created from a picked object.
def setup_visualization(self, show_env, save_frames, save_frames_path):
self.save_frames = save_frames
self.show_env = show_env
self.save_frames_path = save_frames_path
self.episode_counter = 0
self.frame_counter = 0
if self.save_frames:
print(f'We will save frames to {self.save_frames_path}!')
os.makedirs(os.path.join(self.save_frames_path, "000"), exist_ok=True)
@property
def normalizer_read_only(self):
return self._read_only
@normalizer_read_only.setter
def normalizer_read_only(self, value):
self._read_only = bool(value)
if isinstance(self.state_filter, ZFilter):
if not hasattr(self.state_filter, 'read_only') and value:
print('Warning: requested to set state_filter.read_only=True but the underlying ZFilter does not support it.')
elif hasattr(self.state_filter, 'read_only'):
self.state_filter.read_only = self._read_only
if isinstance(self.reward_filter, ZFilter) or isinstance(self.reward_filter, RewardFilter):
if not hasattr(self.reward_filter, 'read_only') and value:
print('Warning: requested to set reward_filter.read_only=True but the underlying ZFilter does not support it.')
elif hasattr(self.reward_filter, 'read_only'):
self.reward_filter.read_only = self._read_only
def reset(self):
# Set a deterministic random seed for reproduicability
self.env.seed(random.getrandbits(31))
# Reset the state, and the running total reward
start_state = self.env.reset()
self.total_true_reward = 0.0
self.counter = 0.0
self.episode_counter += 1
if self.save_frames:
os.makedirs(os.path.join(self.save_frames_path, f"{self.episode_counter:03d}"), exist_ok=True)
self.frame_counter = 0
self.state_filter.reset()
self.reward_filter.reset()
return self.state_filter(start_state, reset=True)
def step(self, action):
state, reward, is_done, info = self.env.step(action)
if self.show_env:
self.env.render()
# Frameskip (every 6 frames, will be rendered at 25 fps)
if self.save_frames and int(self.counter) % 6 == 0:
image = self.env.render(mode='rgb_array')
path = os.path.join(self.save_frames_path, f"{self.episode_counter:03d}", f"{self.frame_counter+1:04d}.bmp")
image = Image.fromarray(image)
image.save(path)
self.frame_counter += 1
state = self.state_filter(state)
self.total_true_reward += reward
self.counter += 1
_reward = self.reward_filter(reward)
if is_done:
info['done'] = (self.counter, self.total_true_reward)
return state, _reward, is_done, info
| 5,671 | 42.630769 | 127 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/policy_gradients/torch_utils.py | import torch as ch
from torch.distributions.categorical import Categorical
import numpy as np
'''
Common functions/utilities implemented in PyTorch
Sorted into categories:
- General functions
- Actor-critic helpers
- Policy gradient (PPO/TRPO) helpers
- Normalization helpers
- Neural network helpers
- Initialization helpers
'''
########################
### GENERAL UTILITY FUNCTIONS:
# Parameters, unroll, cu_tensorize, cpu_tensorize, shape_equal_cmp,
# shape_equal, scat, determinant, safe_op_or_neg_one
########################
CKPTS_TABLE = 'checkpoints'
class Parameters(dict):
og_getattr = dict.__getitem__
og_setattr = dict.__setitem__
def __getattr__(self, x):
try:
res = self.og_getattr(x.lower())
return res
except KeyError:
raise AttributeError(x)
def __setattr__(self, x, v):
return self.og_setattr(x.lower(), v)
"""
class Parameters():
'''
Parameters class, just a nice way of accessing a dictionary
> ps = Parameters({"a": 1, "b": 3})
> ps.A # returns 1
> ps.B # returns 3
'''
def __init__(self, params):
self.params = params
def __getattr__(self, x):
if x == 'params':
return self
try:
res = self.params[x.lower()]
return res
except KeyError:
raise AttributeError(x)
"""
def unroll(*tensors):
'''
Utility function unrolling a list of tensors
Inputs:
- tensors; all arguments should be tensors (at least 2D))))
Returns:
- The same tensors but with the first two dimensions flattened
'''
rets = []
for t in tensors:
if t is None:
rets.append(None)
else:
assert len(t.shape) >= 2
new_shape = [t.shape[0]*t.shape[1]] + list(t.shape[2:])
rets.append(t.contiguous().view(new_shape))
return rets
def cu_tensorize(t):
'''
Utility function for turning arrays into cuda tensors
Inputs:
- t, list
Returns:
- Tensor version of t
'''
return ch.tensor(t).float().cuda()
def cpu_tensorize(t):
'''
Utility function for turning arrays into cpu tensors
Inputs:
- t, list
Returns:
- Tensor version of t
'''
return ch.tensor(t).float()
def gpu_mapper():
return ch.device('cuda:0') if not cpu else ch.device('cpu')
def shape_equal_cmp(*args):
'''
Checks that the shapes of the passed arguments are equal
Inputs:
- All arguments should be tensors
Returns:
- True if all arguments have the same shape, else ValueError
'''
for i in range(len(args)-1):
if args[i].shape != args[i+1].shape:
s = "\n".join([str(x.shape) for x in args])
raise ValueError("Expected equal shapes. Got:\n%s" % s)
return True
def shape_equal(a, *args):
'''
Checks that a group of tensors has a required shape
Inputs:
- a, required shape for all the tensors
- Rest of the arguments are tensors
Returns:
- True if all tensors are of shape a, otherwise ValueError
'''
for arg in args:
if list(arg.shape) != list(a):
if len(arg.shape) != len(a):
raise ValueError("Expected shape: %s, Got shape %s" \
% (str(a), str(arg.shape)))
for i in range(len(arg.shape)):
if a[i] == -1 or a[i] == arg.shape[i]:
continue
raise ValueError("Expected shape: %s, Got shape %s" \
% (str(a), str(arg.shape)))
return shape_equal_cmp(*args)
def scat(a, b, axis):
'''
Set-or-Cat (scat)
Circumventing a PyTorch bug that auto-squeezes empty tensors.
Inputs:
a - A torch tensor, or None
b - A torch tensor, can not be None
axis - Axis to concat with
Returns:
- b if a is None, otherwise b concatted to a
'''
if a is None:
return b
return ch.cat((a, b), axis)
def determinant(mat):
'''
Returns the determinant of a diagonal matrix
Inputs:
- mat, a diagonal matrix
Returns:
- The determinant of mat, aka product of the diagonal
'''
return ch.exp(ch.log(mat).sum())
def safe_op_or_neg_one(maybe_empty, op):
'''
Performs an operation on a tensor which may be empty.
Returns -1 if the tensor is empty, and returns the result
of the op otherwise.
Inputs:
- maybe_empty, tensor which may be empty
- op, an operation (tensor) -> (object) to perform
Returns:
- -1 if tensor is empty otherwise op(maybe_empty)
'''
if maybe_empty.nelement() == 0:
return -1.
else:
return op(maybe_empty)
########################
### ACTOR-CRITIC HELPERS:
# discount_path, get_path_indices, select_prob_dists
########################
# Can be used to convert rewards into discounted returns:
# ret[i] = sum of t = i to T of gamma^(t-i) * rew[t]
def discount_path(path, h):
'''
Given a "path" of items x_1, x_2, ... x_n, return the discounted
path, i.e.
X_1 = x_1 + h*x_2 + h^2 x_3 + h^3 x_4
X_2 = x_2 + h*x_3 + h^2 x_4 + h^3 x_5
etc.
Can do (more efficiently?) w SciPy. Python here for readability
Inputs:
- path, list/tensor of floats
- h, discount rate
Outputs:
- Discounted path, as above
'''
curr = 0
rets = []
for i in range(len(path)):
curr = curr*h + path[-1-i]
rets.append(curr)
rets = ch.stack(list(reversed(rets)), 0)
return rets
def get_path_indices(not_dones):
"""
Returns list of tuples of the form:
(agent index, time index start, time index end + 1)
For each path seen in the not_dones array of shape (# agents, # time steps)
E.g. if we have an not_dones of composition:
tensor([[1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 0, 1, 1, 0, 1]], dtype=torch.uint8)
Then we would return:
[(0, 0, 3), (0, 3, 10), (1, 0, 3), (1, 3, 5), (1, 5, 9), (1, 9, 10)]
"""
indices = []
num_timesteps = not_dones.shape[1]
for actor in range(not_dones.shape[0]):
last_index = 0
for i in range(num_timesteps):
if not_dones[actor, i] == 0.:
indices.append((actor, last_index, i + 1))
last_index = i + 1
if last_index != num_timesteps:
indices.append((actor, last_index, num_timesteps))
return indices
def select_prob_dists(pds, selected=None, detach=True):
'''
Given a tensor/tuple probability distributions, and
some indices, select a subset of the distributions
`pds`s according to the indices `selected`.
Inputs:
- pds: list of propo
'''
if type(pds) is tuple:
if selected is not None:
tup = (pds[0][selected], pds[1])
else:
tup = pds
return tuple(x.detach() if detach else x for x in tup)
out = pds[selected] if selected is not None else pds
return out.detach() if detach else out
########################
### POLICY GRADIENT HELPERS:
# vjp, jvp, cg_solve, backtracking_line_search
########################
def vjp(f_x, theta, v, create=True):
'''
Vector-jacobian product
Calculates v^TJ, or J^T v, using standard backprop
Input:
- f_x, function of which we want the Jacobian
- theta, variable with respect to which we want Jacobian
- v, vector that we want multiplied by the Jacobian
Returns:
- J^T @ v, without using n^2 space
'''
grad_list = ch.autograd.grad(f_x, theta, v, retain_graph=True, create_graph=create)
return ch.nn.utils.parameters_to_vector(grad_list)
def jvp(f_x, theta, v):
'''
Jacobian-vector product
Calculate the Jacobian-vector product, see
https://j-towns.github.io/2017/06/12/A-new-trick.html for math
Input:
- f_x, function of which we want the Jacobian
- theta, variable with respect to which we want Jacobian
- v, vector that we want multiplied by the Jacobian
Returns:
- J @ v, without using n^2 space
'''
w = ch.ones_like(f_x, requires_grad=True)
JTw = vjp(f_x, theta, w)
return vjp(JTw, w, v)
def cg_solve(fvp_func, b, nsteps):
'''
Conjugate Gradients Algorithm
Solves Hx = b, where H is the Fisher matrix and b is known
Input:
- fvp_func, a callable function returning Fisher-vector product
- b, the RHS of the above
- nsteps, the number of steps on CG to take
Returns:
- An approximate solution x of Hx = b
'''
# Initialize the solution, residual, direction vectors
x = ch.zeros(b.size())
r = b.clone()
p = b.clone()
new_rnorm = ch.dot(r,r)
for _ in range(nsteps):
rnorm = new_rnorm
fvp = fvp_func(p)
alpha = rnorm / ch.dot(p, fvp)
x += alpha * p
r -= alpha * fvp
new_rnorm = ch.dot(r, r)
ratio = new_rnorm / rnorm
p = r + ratio * p
return x
def backtracking_line_search(f, x, expected_improve_rate,
num_tries=10, accept_ratio=.1):
'''
Backtracking Line Search
Inputs:
- f, function for improvement of the objective
- x, biggest step to try (successively halved)
- num_tries, number of times to try halving x before giving up
- accept_ratio, how much of the expected improve rate we have to
improve by
'''
# f gives improvement
for i in range(num_tries):
scaling = 2**(-i)
scaled = x * scaling
improve = f(scaled)
expected_improve = expected_improve_rate * scaling
if improve/expected_improve > accept_ratio and improve > 0:
print("We good! %f" % (scaling,))
return scaled
return 0.
########################
### NORMALIZATION HELPERS:
# RunningStat, ZFilter, StateWithTime
########################
class RunningStat(object):
'''
Keeps track of first and second moments (mean and variance)
of a streaming time series.
Taken from https://github.com/joschu/modular_rl
Math in http://www.johndcook.com/blog/standard_deviation/
'''
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class Identity:
'''
A convenience class which simply implements __call__
as the identity function
'''
def __call__(self, x, *args, **kwargs):
return x
def reset(self):
pass
class RewardFilter:
"""
"Incorrect" reward normalization [copied from OAI code]
Incorrect in the sense that we
1. update return
2. divide reward by std(return) *without* subtracting and adding back mean
"""
def __init__(self, prev_filter, shape, gamma, clip=None, read_only=False):
assert shape is not None
self.gamma = gamma
self.prev_filter = prev_filter
self.rs = RunningStat(shape)
self.ret = np.zeros(shape)
self.clip = clip
self.read_only = read_only
def __call__(self, x, **kwargs):
x = self.prev_filter(x, **kwargs)
self.ret = self.ret * self.gamma + x
# The object might be from a pickle object which does not have this property.
if not hasattr(self, 'read_only') or not self.read_only:
self.rs.push(self.ret)
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def reset(self):
self.ret = np.zeros_like(self.ret)
self.prev_filter.reset()
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, prev_filter, shape, center=True, scale=True, clip=None, read_only=False):
assert shape is not None
self.center = center
self.scale = scale
self.clip = clip
self.rs = RunningStat(shape)
self.prev_filter = prev_filter
self.read_only = read_only
def __call__(self, x, **kwargs):
x = self.prev_filter(x, **kwargs)
# The object might be from a pickle object which does not have this property.
if not hasattr(self, 'read_only') or not self.read_only:
self.rs.push(x)
if self.center:
x = x - self.rs.mean
if self.scale:
if self.center:
x = x / (self.rs.std + 1e-8)
else:
diff = x - self.rs.mean
diff = diff/(self.rs.std + 1e-8)
x = diff + self.rs.mean
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def reset(self):
self.prev_filter.reset()
class StateWithTime:
'''
Keeps track of the time t in an environment, and
adds t/T as a dimension to the state, where T is the
time horizon, given at initialization.
'''
def __init__(self, prev_filter, horizon):
self.counter = 0
self.horizon = horizon
self.prev_filter = prev_filter
def __call__(self, x, reset=False, count=True, **kwargs):
x = self.prev_filter(x, **kwargs)
self.counter += 1 if count else 0
self.counter = 0 if reset else self.counter
return np.array(list(x) + [self.counter/self.horizon,])
def reset(self):
self.prev_filter.reset()
class Trajectories:
def __init__(self, states=None, rewards=None, returns=None, not_dones=None,
actions=None, action_log_probs=None, advantages=None,
unrolled=False, values=None, action_means=None, action_std=None):
self.states = states
self.rewards = rewards
self.returns = returns
self.values = values
self.not_dones = not_dones
self.actions = actions
self.action_log_probs = action_log_probs
self.advantages = advantages
self.action_means = action_means # A batch of vectors.
self.action_std = action_std # A single vector.
self.unrolled = unrolled
"""
# this is disgusting and we should fix it
if states is not None:
num_saps = states.shape[0]
assert states is None or states.shape[0] == num_saps
assert rewards is None or rewards.shape[0] == num_saps
assert returns is None or returns.shape[0] == num_saps
assert values is None or values.shape[0] == num_saps
assert not_dones is None or not_dones.shape[0] == num_saps
assert actions is None or actions.shape[0] == num_saps
assert action_log_probs is None or action_log_probs.shape[0] == num_saps
assert advantages is None or advantages.shape[0] == num_saps
self.size = num_saps
"""
def unroll(self):
assert not self.unrolled
return self.tensor_op(unroll, should_wrap=False)
def tensor_op(self, lam, should_wrap=True):
if should_wrap:
def op(*args):
return [lam(v) for v in args]
else:
op = lam
tt = op(self.states, self.rewards, self.returns, self.not_dones)
tt2 = op(self.actions, self.action_log_probs, self.advantages, self.action_means)
values, = op(self.values)
ts = Trajectories(states=tt[0], rewards=tt[1], returns=tt[2],
not_dones=tt[3], actions=tt2[0],
action_log_probs=tt2[1], advantages=tt2[2], action_means=tt2[3], action_std=self.action_std,
values=values, unrolled=True)
return ts
########################
### NEURAL NETWORK HELPERS:
# orthogonal_init
########################
def orthogonal_init(tensor, gain=1):
'''
Fills the input `Tensor` using the orthogonal initialization scheme from OpenAI
Args:
tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
gain: optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> orthogonal_init(w)
'''
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = tensor.new(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the qr factorization
u, s, v = ch.svd(flattened, some=True)
if rows < cols:
u.t_()
q = u if tuple(u.shape) == (rows, cols) else v
with ch.no_grad():
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
| 17,231 | 29.771429 | 118 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/policy_gradients/agent.py | import torch
import torch as ch
import copy
import tqdm
import sys
import time
import dill
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
from copy import deepcopy
import gym
from auto_LiRPA import BoundedModule
from auto_LiRPA.eps_scheduler import LinearScheduler
from auto_LiRPA.bounded_tensor import BoundedTensor
from auto_LiRPA.perturbations import PerturbationLpNorm
from .models import *
from .torch_utils import *
from .steps import value_step, step_with_mode, pack_history
from .logging import *
from multiprocessing import Process, Queue
from .custom_env import Env
from .convex_relaxation import get_kl_bound as get_state_kl_bound
class Trainer():
'''
This is a class representing a Policy Gradient trainer, which
trains both a deep Policy network and a deep Value network.
Exposes functions:
- advantage_and_return
- multi_actor_step
- reset_envs
- run_trajectories
- train_step
Trainer also handles all logging, which is done via the "cox"
library
'''
def __init__(self, policy_net_class, value_net_class, params,
store, advanced_logging=True, log_every=5):
'''
Initializes a new Trainer class.
Inputs;
- policy, the class of policy network to use (inheriting from nn.Module)
- val, the class of value network to use (inheriting from nn.Module)
- step, a reference to a function to use for the policy step (see steps.py)
- params, an dictionary with all of the required hyperparameters
'''
# Parameter Loading
self.params = Parameters(params)
# Whether or not the value network uses the current timestep
time_in_state = self.VALUE_CALC == "time"
# Whether to use GPU (as opposed to CPU)
if not self.CPU:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
# Environment Loading
def env_constructor():
# Whether or not we should add the time to the state
horizon_to_feed = self.T if time_in_state else None
return Env(self.GAME, norm_states=self.NORM_STATES,
norm_rewards=self.NORM_REWARDS,
params=self.params,
add_t_with_horizon=horizon_to_feed,
clip_obs=self.CLIP_OBSERVATIONS,
clip_rew=self.CLIP_REWARDS,
show_env=self.SHOW_ENV,
save_frames=self.SAVE_FRAMES,
save_frames_path=self.SAVE_FRAMES_PATH)
self.envs = [env_constructor() for _ in range(self.NUM_ACTORS)]
self.params.AGENT_TYPE = "discrete" if self.envs[0].is_discrete else "continuous"
self.params.NUM_ACTIONS = self.envs[0].num_actions
self.params.NUM_FEATURES = self.envs[0].num_features
self.policy_step = step_with_mode(self.MODE, adversary=False)
self.adversary_policy_step = step_with_mode(self.MODE, adversary=True)
self.params.MAX_KL_INCREMENT = (self.params.MAX_KL_FINAL - self.params.MAX_KL) / self.params.TRAIN_STEPS
self.advanced_logging = advanced_logging
self.n_steps = 0
self.log_every = log_every
self.policy_net_class = policy_net_class
# Instantiation
self.policy_model = policy_net_class(self.NUM_FEATURES, self.NUM_ACTIONS,
self.INITIALIZATION,
time_in_state=time_in_state,
activation=self.policy_activation)
# Instantiate convex relaxation model when mode is 'robust_ppo'
if self.MODE == 'robust_ppo' or self.MODE == 'adv_sa_ppo':
self.create_relaxed_model(time_in_state)
# Minimax training
if self.MODE == 'adv_ppo' or self.MODE == 'adv_trpo' or self.MODE == 'adv_sa_ppo':
# Copy parameters if they are set to "same".
if self.params.ADV_PPO_LR_ADAM == "same":
self.params.ADV_PPO_LR_ADAM = self.params.PPO_LR_ADAM
if self.params.ADV_VAL_LR == "same":
self.params.ADV_VAL_LR = self.params.VAL_LR
if self.params.ADV_CLIP_EPS == "same":
self.params.ADV_CLIP_EPS = self.params.CLIP_EPS
if self.params.ADV_EPS == "same":
self.params.ADV_EPS = self.params.ROBUST_PPO_EPS
if self.params.ADV_ENTROPY_COEFF == "same":
self.params.ADV_ENTROPY_COEFF = self.params.ENTROPY_COEFF
# The adversary policy has features as input, features as output.
self.adversary_policy_model = policy_net_class(self.NUM_FEATURES, self.NUM_FEATURES,
self.INITIALIZATION,
time_in_state=time_in_state,
activation=self.policy_activation)
# Optimizer for adversary
self.params.ADV_POLICY_ADAM = optim.Adam(self.adversary_policy_model.parameters(), lr=self.ADV_PPO_LR_ADAM, eps=1e-5)
# Adversary value function.
self.adversary_val_model = value_net_class(self.NUM_FEATURES, self.INITIALIZATION)
self.adversary_val_opt = optim.Adam(self.adversary_val_model.parameters(), lr=self.ADV_VAL_LR, eps=1e-5)
assert self.adversary_policy_model.discrete == (self.AGENT_TYPE == "discrete")
# Learning rate annealling for adversary.
if self.ANNEAL_LR:
adv_lam = lambda f: 1-f/self.TRAIN_STEPS
adv_ps = optim.lr_scheduler.LambdaLR(self.ADV_POLICY_ADAM,
lr_lambda=adv_lam)
adv_vs = optim.lr_scheduler.LambdaLR(self.adversary_val_opt, lr_lambda=adv_lam)
self.params.ADV_POLICY_SCHEDULER = adv_ps
self.params.ADV_VALUE_SCHEDULER = adv_vs
opts_ok = (self.PPO_LR == -1 or self.PPO_LR_ADAM == -1)
assert opts_ok, "One of ppo_lr and ppo_lr_adam must be -1 (off)."
# Whether we should use Adam or simple GD to optimize the policy parameters
if self.PPO_LR_ADAM != -1:
kwargs = {
'lr':self.PPO_LR_ADAM,
}
if self.params.ADAM_EPS > 0:
kwargs['eps'] = self.ADAM_EPS
self.params.POLICY_ADAM = optim.Adam(self.policy_model.parameters(),
**kwargs)
else:
self.params.POLICY_ADAM = optim.SGD(self.policy_model.parameters(), lr=self.PPO_LR)
# If using a time dependent value function, add one extra feature
# for the time ratio t/T
if time_in_state:
self.params.NUM_FEATURES = self.NUM_FEATURES + 1
# Value function optimization
self.val_model = value_net_class(self.NUM_FEATURES, self.INITIALIZATION)
self.val_opt = optim.Adam(self.val_model.parameters(), lr=self.VAL_LR, eps=1e-5)
assert self.policy_model.discrete == (self.AGENT_TYPE == "discrete")
# Learning rate annealing
# From OpenAI hyperparametrs:
# Set adam learning rate to 3e-4 * alpha, where alpha decays from 1 to 0 over training
if self.ANNEAL_LR:
lam = lambda f: 1-f/self.TRAIN_STEPS
ps = optim.lr_scheduler.LambdaLR(self.POLICY_ADAM,
lr_lambda=lam)
vs = optim.lr_scheduler.LambdaLR(self.val_opt, lr_lambda=lam)
self.params.POLICY_SCHEDULER = ps
self.params.VALUE_SCHEDULER = vs
if store is not None:
self.setup_stores(store)
else:
print("Not saving results to cox store.")
def create_relaxed_model(self, time_in_state=False):
# Create state perturbation model for robust PPO training.
if isinstance(self.policy_model, CtsPolicy):
if self.ROBUST_PPO_METHOD == "convex-relax":
from .convex_relaxation import RelaxedCtsPolicyForState
relaxed_policy_model = RelaxedCtsPolicyForState(
self.NUM_FEATURES, self.NUM_ACTIONS, time_in_state=time_in_state,
activation=self.policy_activation, policy_model=self.policy_model)
dummy_input1 = torch.randn(1, self.NUM_FEATURES)
inputs = (dummy_input1, )
self.relaxed_policy_model = BoundedModule(relaxed_policy_model, inputs)
else:
# For SGLD no need to create the relaxed model
self.relaxed_policy_model = None
self.robust_eps_scheduler = LinearScheduler(self.params.ROBUST_PPO_EPS, self.params.ROBUST_PPO_EPS_SCHEDULER_OPTS)
if self.params.ROBUST_PPO_BETA_SCHEDULER_OPTS == "same":
self.robust_beta_scheduler = LinearScheduler(self.params.ROBUST_PPO_BETA, self.params.ROBUST_PPO_EPS_SCHEDULER_OPTS)
else:
self.robust_beta_scheduler = LinearScheduler(self.params.ROBUST_PPO_BETA, self.params.ROBUST_PPO_BETA_SCHEDULER_OPTS)
else:
raise NotImplementedError
"""Initialize sarsa training."""
def setup_sarsa(self, lr_schedule, eps_scheduler, beta_scheduler):
# Create the Sarsa model, with S and A as the input.
self.sarsa_model = ValueDenseNet(self.NUM_FEATURES + self.NUM_ACTIONS, self.INITIALIZATION)
self.sarsa_opt = optim.Adam(self.sarsa_model.parameters(), lr=self.VAL_LR, eps=1e-5)
self.sarsa_scheduler = optim.lr_scheduler.LambdaLR(self.sarsa_opt, lr_schedule)
self.sarsa_eps_scheduler = eps_scheduler
self.sarsa_beta_scheduler = beta_scheduler
# Convert model with relaxation wrapper.
dummy_input = torch.randn(1, self.NUM_FEATURES + self.NUM_ACTIONS)
self.relaxed_sarsa_model = BoundedModule(self.sarsa_model, dummy_input)
"""Initialize imitation (snooping) training."""
def setup_imit(self, train=True, lr=1e-3):
# Create a same policy network.
self.imit_network = self.policy_net_class(self.NUM_FEATURES, self.NUM_ACTIONS,
self.INITIALIZATION, time_in_state=self.VALUE_CALC == "time",
activation=self.policy_activation)
if train:
if self.PPO_LR_ADAM != -1:
kwargs = {
'lr':lr,
}
if self.params.ADAM_EPS > 0:
kwargs['eps'] = self.ADAM_EPS
self.imit_opt = optim.Adam(self.imit_network.parameters(), **kwargs)
else:
self.imit_opt = optim.SGD(self.imit_network.parameters(), lr=lr)
"""Training imitation agent"""
def imit_steps(self, all_actions, all_states, all_not_dones, num_epochs):
assert len(all_actions) == len(all_states)
for e in range(num_epochs):
total_loss_val = 0.0
if self.HISTORY_LENGTH > 0:
loss = 0.0
batches, alive_masks, time_masks, lengths = pack_history([all_states, all_actions], all_not_dones, max_length=self.HISTORY_LENGTH)
self.imit_opt.zero_grad()
hidden = None
for i, batch in enumerate(batches):
batch_states, batch_actions = batch
mask = time_masks[i].unsqueeze(2)
if hidden is not None:
hidden = [h[:, alive_masks[i], :].detach() for h in hidden]
mean, std, hidden = self.imit_network.multi_forward(batch_states, hidden=hidden)
batch_loss = torch.nn.MSELoss()(mean*mask, batch_actions*mask)
loss += batch_loss
loss.backward()
self.imit_opt.step()
total_loss_val = loss.item()
else:
state_indices = np.arange(len(all_actions))
# np.random.shuffle(state_indices)
splits = np.array_split(state_indices, self.params.NUM_MINIBATCHES)
np.random.shuffle(splits)
for selected in splits:
def sel(*args):
return [v[selected] for v in args]
self.imit_opt.zero_grad()
sel_states, sel_actions, sel_not_dones = sel(all_states, all_actions, all_not_dones)
act, _ = self.imit_network(sel_states)
loss = torch.nn.MSELoss()(sel_actions, act)
loss.backward()
self.imit_opt.step()
total_loss_val += loss.item()
print('Epoch [%d/%d] avg loss: %.8f' % (e+1, num_epochs, total_loss_val / len(all_actions)))
def setup_stores(self, store):
# Logging setup
self.store = store
if self.MODE == 'adv_ppo' or self.MODE == 'adv_trpo' or self.MODE == 'adv_sa_ppo':
adv_optimization_table = {
'mean_reward':float,
'final_value_loss':float,
'final_policy_loss':float,
'final_surrogate_loss':float,
'entropy_bonus':float,
'mean_std':float
}
self.store.add_table('optimization_adv', adv_optimization_table)
optimization_table = {
'mean_reward':float,
'final_value_loss':float,
'final_policy_loss':float,
'final_surrogate_loss':float,
'entropy_bonus':float,
'mean_std':float,
}
self.store.add_table('optimization', optimization_table)
if self.advanced_logging:
paper_constraint_cols = {
'avg_kl':float,
'max_kl':float,
'max_ratio':float,
'opt_step':int
}
value_cols = {
'heldout_gae_loss':float,
'heldout_returns_loss':float,
'train_gae_loss':float,
'train_returns_loss':float
}
weight_cols = {}
for name, _ in self.policy_model.named_parameters():
name += "."
for k in ["l1", "l2", "linf", "delta_l1", "delta_l2", "delta_linf"]:
weight_cols[name + k] = float
self.store.add_table('paper_constraints_train',
paper_constraint_cols)
self.store.add_table('paper_constraints_heldout',
paper_constraint_cols)
self.store.add_table('value_data', value_cols)
self.store.add_table('weight_updates', weight_cols)
if self.params.MODE == 'robust_ppo' or self.params.MODE == 'adv_sa_ppo':
robust_cols ={
'eps': float,
'beta': float,
'kl': float,
'surrogate': float,
'entropy': float,
'loss': float,
}
self.store.add_table('robust_ppo_data', robust_cols)
def __getattr__(self, x):
'''
Allows accessing self.A instead of self.params.A
'''
if x == 'params':
return {}
try:
return getattr(self.params, x)
except KeyError:
raise AttributeError(x)
def advantage_and_return(self, rewards, values, not_dones):
"""
Calculate GAE advantage, discounted returns, and
true reward (average reward per trajectory)
GAE: delta_t^V = r_t + discount * V(s_{t+1}) - V(s_t)
using formula from John Schulman's code:
V(s_t+1) = {0 if s_t is terminal
{v_s_{t+1} if s_t not terminal and t != T (last step)
{v_s if s_t not terminal and t == T
"""
assert shape_equal_cmp(rewards, values, not_dones)
V_s_tp1 = ch.cat([values[:,1:], values[:, -1:]], 1) * not_dones
deltas = rewards + self.GAMMA * V_s_tp1 - values
# now we need to discount each path by gamma * lam
advantages = ch.zeros_like(rewards)
returns = ch.zeros_like(rewards)
indices = get_path_indices(not_dones)
for agent, start, end in indices:
advantages[agent, start:end] = discount_path( \
deltas[agent, start:end], self.LAMBDA*self.GAMMA)
returns[agent, start:end] = discount_path( \
rewards[agent, start:end], self.GAMMA)
return advantages.clone().detach(), returns.clone().detach()
def reset_envs(self, envs):
'''
Resets environments and returns initial state with shape:
(# actors, 1, ... state_shape)
'''
if self.CPU:
return cpu_tensorize([env.reset() for env in envs]).unsqueeze(1)
else:
return cu_tensorize([env.reset() for env in envs]).unsqueeze(1)
def multi_actor_step(self, actions, envs):
'''
Simulate a "step" by several actors on their respective environments
Inputs:
- actions, list of actions to take
- envs, list of the environments in which to take the actions
Returns:
- completed_episode_info, a variable-length list of final rewards and episode lengths
for the actors which have completed
- rewards, a actors-length tensor with the rewards collected
- states, a (actors, ... state_shape) tensor with resulting states
- not_dones, an actors-length tensor with 0 if terminal, 1 otw
'''
normed_rewards, states, not_dones = [], [], []
completed_episode_info = []
for action, env in zip(actions, envs):
gym_action = action[0].cpu().numpy()
new_state, normed_reward, is_done, info = env.step(gym_action)
if is_done:
completed_episode_info.append(info['done'])
new_state = env.reset()
# Aggregate
normed_rewards.append([normed_reward])
not_dones.append([int(not is_done)])
states.append([new_state])
tensor_maker = cpu_tensorize if self.CPU else cu_tensorize
data = list(map(tensor_maker, [normed_rewards, states, not_dones]))
return [completed_episode_info, *data]
def run_trajectories(self, num_saps, return_rewards=False, should_tqdm=False,
collect_adversary_trajectory=False):
"""
Resets environments, and runs self.T steps in each environment in
self.envs. If an environment hits a terminal state, the env is
restarted and the terminal timestep marked. Each item in the tuple is
a tensor in which the first coordinate represents the actor, and the
second coordinate represents the time step. The third+ coordinates, if
they exist, represent additional information for each time step.
Inputs: None
Returns:
- rewards: (# actors, self.T)
- not_dones: (# actors, self.T) 1 in timestep if terminal state else 0
- actions: (# actors, self.T, ) indices of actions
- action_logprobs: (# actors, self.T, ) log probabilities of each action
- states: (# actors, self.T, ... state_shape) states
"""
if collect_adversary_trajectory:
# The adversary does not change environment normalization.
# So a trained adversary can be applied to the original policy when it is trained as an optimal attack.
old_env_read_only_flags = []
for e in self.envs:
old_env_read_only_flags.append(e.normalizer_read_only)
e.normalizer_read_only = True
# Arrays to be updated with historic info
envs = self.envs
initial_states = self.reset_envs(envs)
self.policy_model.reset()
self.val_model.reset()
# Holds information (length and true reward) about completed episodes
completed_episode_info = []
traj_length = int(num_saps // self.NUM_ACTORS)
shape = (self.NUM_ACTORS, traj_length)
all_zeros = [ch.zeros(shape) for i in range(3)]
rewards, not_dones, action_log_probs = all_zeros
if collect_adversary_trajectory:
# collect adversary trajectory is only valid in minimax training mode.
assert self.MODE == "adv_ppo" or self.MODE == "adv_trpo" or self.MODE == "adv_sa_ppo"
# For the adversary, action is a state perturbation.
actions_shape = shape + (self.NUM_FEATURES,)
else:
actions_shape = shape + (self.NUM_ACTIONS,)
actions = ch.zeros(actions_shape)
# Mean of the action distribution. Used for avoid unnecessary recomputation.
action_means = ch.zeros(actions_shape)
# Log Std of the action distribution.
# action_stds = ch.zeros(actions_shape)
states_shape = (self.NUM_ACTORS, traj_length+1) + initial_states.shape[2:]
states = ch.zeros(states_shape)
iterator = range(traj_length) if not should_tqdm else tqdm.trange(traj_length)
assert self.NUM_ACTORS == 1
is_advpolicy_training = self.MODE == "adv_ppo" or self.MODE == "adv_trpo" or self.MODE == "adv_sa_ppo"
collect_perturbed_state = ((is_advpolicy_training and not collect_adversary_trajectory)
or ((not is_advpolicy_training) and self.COLLECT_PERTURBED_STATES))
if collect_perturbed_state:
# States are collected after the perturbation. We cannot set states[:, 0, :] here as we have not started perturbation yet.
last_states = initial_states.squeeze(1) # Remove the second dimension (number of actions)
else:
# States are collected before the perturbation.
states[:, 0, :] = initial_states
last_states = states[:, 0, :]
for t in iterator:
# assert shape_equal([self.NUM_ACTORS, self.NUM_FEATURES], last_states)
# Retrieve probabilities:
# action_pds: (# actors, # actions), prob dists over actions
# next_actions: (# actors, 1), indices of actions
# next_action_probs: (# actors, 1), prob of taken actions
# The adversary may use the policy or value function, so pause history update.
self.policy_model.pause_history()
self.val_model.pause_history()
if is_advpolicy_training:
# the new minimax adversarial training.
# get perturbation density.
# When collecting trajactory for agent, only run optimal attack when ADV_ADVERSARY_RATIO >= random.random().
# When collecting trajactory for adversary, always apply the optimal attack.
if collect_adversary_trajectory or self.params.ADV_ADVERSARY_RATIO >= random.random():
# Only attack a portion of steps.
adv_perturbation_pds = self.adversary_policy_model(last_states)
next_adv_perturbation_means, next_adv_perturbation_stds = adv_perturbation_pds
# sample from the density.
next_adv_perturbations = self.adversary_policy_model.sample(adv_perturbation_pds)
# get log likelyhood for this perturbation.
next_adv_perturbation_log_probs = self.adversary_policy_model.get_loglikelihood(adv_perturbation_pds, next_adv_perturbations)
# add the perturbation to state (we learn a residual).
last_states = last_states + ch.nn.functional.hardtanh(next_adv_perturbations) * self.ADV_EPS
# the perturbation itself is the action (similar to the next_actions variable below)
next_adv_perturbations = next_adv_perturbations.unsqueeze(1)
else:
# (optional) apply naive adversarial training (not optimal attack)
maybe_attacked_last_states = self.apply_attack(last_states)
# Note that for naive adversarial training, we use the state under perturbation to get the actions.
# However in the trajectory we may still save the state without perturbation as the true environment states are not perturbed.
# (depending on if self.COLLECT_PERTURBED_STATES is set)
# double check if the attack eps is valid
max_eps = (maybe_attacked_last_states - last_states).abs().max().item()
attack_eps = float(self.params.ROBUST_PPO_EPS) if self.params.ATTACK_EPS == "same" else float(self.params.ATTACK_EPS)
if max_eps > attack_eps + 1e-5:
raise RuntimeError(f"{max_eps} > {attack_eps}. Attack implementation has bug and eps is not correctly handled.")
last_states = maybe_attacked_last_states
self.policy_model.continue_history()
self.val_model.continue_history()
action_pds = self.policy_model(last_states)
next_action_means, next_action_stds = action_pds
next_actions = self.policy_model.sample(action_pds)
next_action_log_probs = self.policy_model.get_loglikelihood(action_pds, next_actions)
next_action_log_probs = next_action_log_probs.unsqueeze(1)
# shape_equal([self.NUM_ACTORS, 1], next_action_log_probs)
# if discrete, next_actions is (# actors, 1)
# otw if continuous (# actors, 1, action dim)
next_actions = next_actions.unsqueeze(1)
# if self.policy_model.discrete:
# assert shape_equal([self.NUM_ACTORS, 1], next_actions)
# else:
# assert shape_equal([self.NUM_ACTORS, 1, self.policy_model.action_dim])
ret = self.multi_actor_step(next_actions, envs)
# done_info = List of (length, reward) pairs for each completed trajectory
# (next_rewards, next_states, next_dones) act like multi-actor env.step()
done_info, next_rewards, next_states, next_not_dones = ret
# Reset the policy (if the policy has memory if we are done)
if next_not_dones.item() == 0:
self.policy_model.reset()
self.val_model.reset()
# assert shape_equal([self.NUM_ACTORS, 1], next_rewards, next_not_dones)
# assert shape_equal([self.NUM_ACTORS, 1, self.NUM_FEATURES], next_states)
# If some of the actors finished AND this is not the last step
# OR some of the actors finished AND we have no episode information
if len(done_info) > 0 and (t != self.T - 1 or len(completed_episode_info) == 0):
completed_episode_info.extend(done_info)
# Update histories
# each shape: (nact, t, ...) -> (nact, t + 1, ...)
if collect_adversary_trajectory:
# negate the reward for minimax training. Collect states before perturbation.
next_rewards = -next_rewards
pairs = [
(rewards, next_rewards),
(not_dones, next_not_dones),
(actions, next_adv_perturbations), # The sampled actions, which is perturbations.
(action_means, next_adv_perturbation_means), # The Gaussian mean of actions.
# (action_stds, next_action_stds), # The Gaussian std of actions, is a constant, no need to save.
(action_log_probs, next_adv_perturbation_log_probs),
(states, next_states), # we save the true environment state without perturbation.
]
else:
if collect_perturbed_state:
# New adversarial training. We save the perturbed environment state.
pairs = [
(rewards, next_rewards),
(not_dones, next_not_dones),
(actions, next_actions), # The sampled actions.
(action_means, next_action_means), # The Gaussian mean of actions.
# (action_stds, next_action_stds), # The Gaussian std of actions, is a constant, no need to save.
(action_log_probs, next_action_log_probs),
(states, last_states.unsqueeze(1)), # perturbed environment state.
]
else:
# Previous naive adversarial training. We save the true environment state.
pairs = [
(rewards, next_rewards),
(not_dones, next_not_dones),
(actions, next_actions), # The sampled actions.
(action_means, next_action_means), # The Gaussian mean of actions.
# (action_stds, next_action_stds), # The Gaussian std of actions, is a constant, no need to save.
(action_log_probs, next_action_log_probs),
(states, next_states), # true environment state.
]
for total, v in pairs:
if total is states and not collect_perturbed_state:
# Next states, stores in the next position.
total[:, t+1] = v
else:
# The current action taken, and reward received.
# When perturbed state is collected, we also do not neeed the +1 shift
total[:, t] = v
last_states = next_states[:, 0, :]
if collect_perturbed_state:
if is_advpolicy_training:
# missing the last state; we have not perturb it yet.
adv_perturbation_pds = self.adversary_policy_model(last_states)
# sample from the density.
next_adv_perturbations = self.adversary_policy_model.sample(adv_perturbation_pds)
# add the perturbation to state (we learn a residual).
last_states = last_states + ch.nn.functional.hardtanh(next_adv_perturbations) * self.ADV_EPS
else:
last_states = self.apply_attack(last_states)
states[:, -1] = last_states.unsqueeze(1)
if collect_adversary_trajectory:
# Finished adversary step. Take new samples for normalizing environment.
for e, flag in zip(self.envs, old_env_read_only_flags):
e.normalizer_read_only = flag
# Calculate the average episode length and true rewards over all the trajectories
infos = np.array(list(zip(*completed_episode_info)))
# print(infos)
if infos.size > 0:
_, ep_rewards = infos
avg_episode_length, avg_episode_reward = np.mean(infos, axis=1)
else:
ep_rewards = [-1]
avg_episode_length = -1
avg_episode_reward = -1
# Last state is never acted on, discard
states = states[:,:-1,:]
trajs = Trajectories(rewards=rewards,
action_log_probs=action_log_probs, not_dones=not_dones,
actions=actions, states=states, action_means=action_means, action_std=next_action_stds)
to_ret = (avg_episode_length, avg_episode_reward, trajs)
if return_rewards:
to_ret += (ep_rewards,)
return to_ret
"""Conduct adversarial attack using value network."""
def apply_attack(self, last_states):
if self.params.ATTACK_RATIO < random.random():
# Only attack a portion of steps.
return last_states
eps = self.params.ATTACK_EPS
if eps == "same":
eps = self.params.ROBUST_PPO_EPS
else:
eps = float(eps)
steps = self.params.ATTACK_STEPS
if self.params.ATTACK_METHOD == "critic":
# Find a state that is close the last_states and decreases value most.
if steps > 0:
if self.params.ATTACK_STEP_EPS == "auto":
step_eps = eps / steps
else:
step_eps = float(self.params.ATTACK_STEP_EPS)
clamp_min = last_states - eps
clamp_max = last_states + eps
# Random start.
noise = torch.empty_like(last_states).uniform_(-step_eps, step_eps)
states = last_states + noise
with torch.enable_grad():
for i in range(steps):
states = states.clone().detach().requires_grad_()
value = self.val_model(states).mean(dim=1)
value.backward()
update = states.grad.sign() * step_eps
# Clamp to +/- eps.
states.data = torch.min(torch.max(states.data - update, clamp_min), clamp_max)
self.val_model.zero_grad()
return states.detach()
else:
return last_states
elif self.params.ATTACK_METHOD == "random":
# Apply an uniform random noise.
noise = torch.empty_like(last_states).uniform_(-eps, eps)
return (last_states + noise).detach()
elif self.params.ATTACK_METHOD == "action" or self.params.ATTACK_METHOD == "action+imit":
if steps > 0:
if self.params.ATTACK_STEP_EPS == "auto":
step_eps = eps / steps
else:
step_eps = float(self.params.ATTACK_STEP_EPS)
clamp_min = last_states - eps
clamp_max = last_states + eps
# SGLD noise factor. We simply set beta=1.
noise_factor = np.sqrt(2 * step_eps)
noise = torch.randn_like(last_states) * noise_factor
# The first step has gradient zero, so add the noise and projection directly.
states = last_states + noise.sign() * step_eps
# Current action at this state.
if self.params.ATTACK_METHOD == "action+imit":
if not hasattr(self, "imit_network") or self.imit_network == None:
assert self.params.imit_model_path != None
print('\nLoading imitation network for attack: ', self.params.imit_model_path)
# Setup imitation network
self.setup_imit(train=False)
imit_ckpt = torch.load(self.params.imit_model_path)
self.imit_network.load_state_dict(imit_ckpt['state_dict'])
self.imit_network.reset()
self.imit_network.pause_history()
old_action, old_stdev = self.imit_network(last_states)
else:
old_action, old_stdev = self.policy_model(last_states)
# Normalize stdev, avoid numerical issue
old_stdev /= (old_stdev.mean())
old_action = old_action.detach()
with torch.enable_grad():
for i in range(steps):
states = states.clone().detach().requires_grad_()
if self.params.ATTACK_METHOD == "action+imit":
action_change = (self.imit_network(states)[0] - old_action) / old_stdev
else:
action_change = (self.policy_model(states)[0] - old_action) / old_stdev
action_change = (action_change * action_change).sum(dim=1)
action_change.backward()
# Reduce noise at every step.
noise_factor = np.sqrt(2 * step_eps) / (i+2)
# Project noisy gradient to step boundary.
update = (states.grad + noise_factor * torch.randn_like(last_states)).sign() * step_eps
# Clamp to +/- eps.
states.data = torch.min(torch.max(states.data + update, clamp_min), clamp_max)
if self.params.ATTACK_METHOD == "action+imit":
self.imit_network.zero_grad()
self.policy_model.zero_grad()
return states.detach()
else:
return last_states
elif self.params.ATTACK_METHOD == "sarsa" or self.params.ATTACK_METHOD == "sarsa+action":
# Attack using a learned value network.
assert self.params.ATTACK_SARSA_NETWORK is not None
use_action = self.params.ATTACK_SARSA_ACTION_RATIO > 0 and self.params.ATTACK_METHOD == "sarsa+action"
action_ratio = self.params.ATTACK_SARSA_ACTION_RATIO
assert action_ratio >= 0 and action_ratio <= 1
if not hasattr(self, "sarsa_network"):
self.sarsa_network = ValueDenseNet(state_dim=self.NUM_FEATURES+self.NUM_ACTIONS, init="normal")
print("Loading sarsa network", self.params.ATTACK_SARSA_NETWORK)
sarsa_ckpt = torch.load(self.params.ATTACK_SARSA_NETWORK)
sarsa_meta = sarsa_ckpt['metadata']
sarsa_eps = sarsa_meta['sarsa_eps'] if 'sarsa_eps' in sarsa_meta else "unknown"
sarsa_reg = sarsa_meta['sarsa_reg'] if 'sarsa_reg' in sarsa_meta else "unknown"
sarsa_steps = sarsa_meta['sarsa_steps'] if 'sarsa_steps' in sarsa_meta else "unknown"
print(f"Sarsa network was trained with eps={sarsa_eps}, reg={sarsa_reg}, steps={sarsa_steps}")
if use_action:
print(f"objective: {1.0 - action_ratio} * sarsa + {action_ratio} * action_change")
else:
print("Not adding action change objective.")
self.sarsa_network.load_state_dict(sarsa_ckpt['state_dict'])
if steps > 0:
if self.params.ATTACK_STEP_EPS == "auto":
step_eps = eps / steps
else:
step_eps = float(self.params.ATTACK_STEP_EPS)
clamp_min = last_states - eps
clamp_max = last_states + eps
# Random start.
noise = torch.empty_like(last_states).uniform_(-step_eps, step_eps)
states = last_states + noise
if use_action:
# Current action at this state.
old_action, old_stdev = self.policy_model(last_states)
old_stdev /= (old_stdev.mean())
old_action = old_action.detach()
with torch.enable_grad():
for i in range(steps):
states = states.clone().detach().requires_grad_()
# This is the mean action...
actions = self.policy_model(states)[0]
value = self.sarsa_network(torch.cat((last_states, actions), dim=1)).mean(dim=1)
if use_action:
action_change = (actions - old_action) / old_stdev
# We want to maximize the action change, thus the minus sign.
action_change = -(action_change * action_change).mean(dim=1)
loss = action_ratio * action_change + (1.0 - action_ratio) * value
else:
action_change = 0.0
loss = value
loss.backward()
update = states.grad.sign() * step_eps
# Clamp to +/- eps.
states.data = torch.min(torch.max(states.data - update, clamp_min), clamp_max)
self.val_model.zero_grad()
return states.detach()
else:
return last_states
elif self.params.ATTACK_METHOD == "advpolicy":
# Attack using a learned policy network.
assert self.params.ATTACK_ADVPOLICY_NETWORK is not None
if not hasattr(self, "attack_policy_network"):
self.attack_policy_network = self.policy_net_class(self.NUM_FEATURES, self.NUM_FEATURES,
self.INITIALIZATION,
time_in_state=self.VALUE_CALC == "time",
activation=self.policy_activation)
print("Loading adversary policy network", self.params.ATTACK_ADVPOLICY_NETWORK)
advpolicy_ckpt = torch.load(self.params.ATTACK_ADVPOLICY_NETWORK)
self.attack_policy_network.load_state_dict(advpolicy_ckpt['adversary_policy_model'])
# Unlike other attacks we don't need step or eps here.
# We don't sample and use deterministic adversary policy here.
perturbations_mean, _ = self.attack_policy_network(last_states)
# Clamp using tanh.
perturbed_states = last_states + ch.nn.functional.hardtanh(perturbations_mean) * eps
"""
adv_perturbation_pds = self.attack_policy_network(last_states)
next_adv_perturbations = self.attack_policy_network.sample(adv_perturbation_pds)
perturbed_states = last_states + ch.tanh(next_adv_perturbations) * eps
"""
return perturbed_states.detach()
elif self.params.ATTACK_METHOD == "none":
return last_states
else:
raise ValueError(f'Unknown attack method {self.params.ATTACK_METHOD}')
"""Run trajectories and return saps and values for each state."""
def collect_saps(self, num_saps, should_log=True, return_rewards=False,
should_tqdm=False, test=False, collect_adversary_trajectory=False):
table_name_suffix = "_adv" if collect_adversary_trajectory else ""
with torch.no_grad():
# Run trajectories, get values, estimate advantage
output = self.run_trajectories(num_saps,
return_rewards=return_rewards,
should_tqdm=should_tqdm,
collect_adversary_trajectory=collect_adversary_trajectory)
if not return_rewards:
avg_ep_length, avg_ep_reward, trajs = output
else:
avg_ep_length, avg_ep_reward, trajs, ep_rewards = output
# No need to compute advantage function for testing.
if not test:
# If we are sharing weights between the policy network and
# value network, we use the get_value function of the
# *policy* to # estimate the value, instead of using the value
# net
if not self.SHARE_WEIGHTS:
if collect_adversary_trajectory:
if self.HISTORY_LENGTH > 0 and self.USE_LSTM_VAL:
values = self.adversary_val_model(trajs.states, trajs.not_dones).squeeze(-1)
else:
values = self.adversary_val_model(trajs.states).squeeze(-1)
else:
if self.HISTORY_LENGTH > 0 and self.USE_LSTM_VAL:
values = self.val_model(trajs.states, trajs.not_dones).squeeze(-1)
else:
values = self.val_model(trajs.states).squeeze(-1)
else:
assert self.HISTORY_LENGTH < 1
if collect_adversary_trajectory:
values = self.adversary_policy_model.get_value(trajs.states).squeeze(-1)
else:
values = self.policy_model.get_value(trajs.states).squeeze(-1)
# Calculate advantages and returns
advantages, returns = self.advantage_and_return(trajs.rewards,
values, trajs.not_dones)
trajs.advantages = advantages
trajs.returns = returns
trajs.values = values
assert shape_equal_cmp(trajs.advantages,
trajs.returns, trajs.values)
# Logging
if should_log:
msg = "Current mean reward: %f | mean episode length: %f"
print(msg % (avg_ep_reward, avg_ep_length))
if not test:
self.store.log_table_and_tb('optimization'+table_name_suffix, {
'mean_reward': avg_ep_reward
})
# Unroll the trajectories (actors, T, ...) -> (actors*T, ...)
saps = trajs.unroll()
to_ret = (saps, avg_ep_reward, avg_ep_length)
if return_rewards:
to_ret += (ep_rewards,)
return to_ret
def sarsa_steps(self, saps):
# Begin advanged logging code
assert saps.unrolled
loss = torch.nn.SmoothL1Loss()
action_std = torch.exp(self.policy_model.log_stdev).detach().requires_grad_(False) # Avoid backprop twice.
# We treat all value epochs as one epoch.
self.sarsa_eps_scheduler.set_epoch_length(self.params.VAL_EPOCHS * self.params.NUM_MINIBATCHES)
self.sarsa_beta_scheduler.set_epoch_length(self.params.VAL_EPOCHS * self.params.NUM_MINIBATCHES)
# We count from 1.
self.sarsa_eps_scheduler.step_epoch()
self.sarsa_beta_scheduler.step_epoch()
# saps contains state->action->reward and not_done.
for i in range(self.params.VAL_EPOCHS):
# Create minibatches with shuffuling
state_indices = np.arange(saps.rewards.nelement())
np.random.shuffle(state_indices)
splits = np.array_split(state_indices, self.params.NUM_MINIBATCHES)
# Minibatch SGD
for selected in splits:
def sel(*args):
return [v[selected] for v in args]
self.sarsa_opt.zero_grad()
sel_states, sel_actions, sel_rewards, sel_not_dones = sel(saps.states, saps.actions, saps.rewards, saps.not_dones)
self.sarsa_eps_scheduler.step_batch()
self.sarsa_beta_scheduler.step_batch()
inputs = torch.cat((sel_states, sel_actions), dim=1)
# action_diff = self.sarsa_eps_scheduler.get_eps() * action_std
# inputs_lb = torch.cat((sel_states, sel_actions - action_diff), dim=1).detach().requires_grad_(False)
# inputs_ub = torch.cat((sel_states, sel_actions + action_diff), dim=1).detach().requires_grad_(False)
# bounded_inputs = BoundedTensor(inputs, ptb=PerturbationLpNorm(norm=np.inf, eps=None, x_L=inputs_lb, x_U=inputs_ub))
bounded_inputs = BoundedTensor(inputs, ptb=PerturbationLpNorm(norm=np.inf, eps=self.sarsa_eps_scheduler.get_eps()))
q = self.relaxed_sarsa_model(bounded_inputs).squeeze(-1)
q_old = q[:-1]
q_next = q[1:] * self.GAMMA * sel_not_dones[:-1] + sel_rewards[:-1]
q_next = q_next.detach()
# q_loss = (q_old - q_next).pow(2).sum(dim=-1).mean()
q_loss = loss(q_old, q_next)
# Compute the robustness regularization.
if self.sarsa_eps_scheduler.get_eps() > 0 and self.params.SARSA_REG > 0:
beta = self.sarsa_beta_scheduler.get_eps()
ilb, iub = self.relaxed_sarsa_model.compute_bounds(IBP=True, method=None)
if beta < 1:
clb, cub = self.relaxed_sarsa_model.compute_bounds(IBP=False, method='backward')
lb = beta * ilb + (1 - beta) * clb
ub = beta * iub + (1 - beta) * cub
else:
lb = ilb
ub = iub
# Output dimension is 1. Remove the extra dimension and keep only the batch dimension.
lb = lb.squeeze(-1)
ub = ub.squeeze(-1)
diff = torch.max(ub - q, q - lb)
reg_loss = self.params.SARSA_REG * (diff * diff).mean()
sarsa_loss = q_loss + reg_loss
reg_loss = reg_loss.item()
else:
reg_loss = 0.0
sarsa_loss = q_loss
sarsa_loss.backward()
self.sarsa_opt.step()
print(f'q_loss={q_loss.item():.6g}, reg_loss={reg_loss:.6g}, sarsa_loss={sarsa_loss.item():.6g}')
if self.ANNEAL_LR:
self.sarsa_scheduler.step()
# print('value:', self.val_model(saps.states).mean().item())
return q_loss, q.mean()
def take_steps(self, saps, logging=True, value_only=False, adversary_step=False, increment_scheduler=True):
if adversary_step:
# collect adversary trajectory is only valid in minimax training mode.
assert self.MODE == "adv_ppo" or self.MODE == "adv_trpo" or self.MODE == "adv_sa_ppo"
# Begin advanged logging code
assert saps.unrolled
should_adv_log = self.advanced_logging and \
self.n_steps % self.log_every == 0 and logging
self.params.SHOULD_LOG_KL = self.advanced_logging and \
self.KL_APPROXIMATION_ITERS != -1 and \
self.n_steps % self.KL_APPROXIMATION_ITERS == 0
store_to_pass = self.store if should_adv_log else None
# End logging code
if adversary_step:
policy_model = self.adversary_policy_model
if self.ANNEAL_LR:
policy_scheduler = self.ADV_POLICY_SCHEDULER
val_scheduler = self.ADV_VALUE_SCHEDULER
policy_params = Parameters(self.params.copy())
# In policy_step(), some hard coded attributes will be accessed. We override them.
policy_params.PPO_LR = self.ADV_PPO_LR_ADAM
policy_params.PPO_LR_ADAM = self.ADV_PPO_LR_ADAM
policy_params.POLICY_ADAM = self.ADV_POLICY_ADAM
policy_params.CLIP_EPS = policy_params.ADV_CLIP_EPS
policy_params.ENTROPY_COEFF = policy_params.ADV_ENTROPY_COEFF
val_model = self.adversary_val_model
val_opt = self.adversary_val_opt
table_name_suffix = '_adv'
else:
policy_model = self.policy_model
if self.ANNEAL_LR:
policy_scheduler = self.POLICY_SCHEDULER
val_scheduler = self.VALUE_SCHEDULER
policy_params = self.params
val_model = self.val_model
val_opt = self.val_opt
table_name_suffix = ''
if should_adv_log:
# collect some extra trajactory for validation of KL and max KL.
num_saps = saps.advantages.shape[0]
val_saps = self.collect_saps(num_saps, should_log=False, collect_adversary_trajectory=adversary_step)[0]
out_train = policy_model(saps.states)
out_val = policy_model(val_saps.states)
old_pds = select_prob_dists(out_train, detach=True)
val_old_pds = select_prob_dists(out_val, detach=True)
# Update the value function before unrolling the trajectories
# Pass the logging data into the function if applicable
val_loss = ch.tensor(0.0)
if not self.SHARE_WEIGHTS:
val_loss = value_step(saps.states, saps.returns,
saps.advantages, saps.not_dones, val_model,
val_opt, self.params, store_to_pass,
old_vs=saps.values.detach()).mean()
if self.ANNEAL_LR and increment_scheduler:
val_scheduler.step()
if value_only:
# Run the value iteration only. Return now.
return val_loss
if logging:
self.store.log_table_and_tb('optimization'+table_name_suffix, {
'final_value_loss': val_loss
})
if (self.MODE == 'robust_ppo' or self.MODE == 'adv_sa_ppo') and not adversary_step and logging:
# Logging Robust PPO KL, entropy, etc.
store_to_pass = self.store
# Take optimizer steps
args = [saps.states, saps.actions, saps.action_log_probs,
saps.rewards, saps.returns, saps.not_dones,
saps.advantages, policy_model, policy_params,
store_to_pass, self.n_steps]
if (self.MODE == 'robust_ppo' or self.MODE == 'adv_sa_ppo') and isinstance(self.policy_model, CtsPolicy) and not adversary_step:
args += [self.relaxed_policy_model, self.robust_eps_scheduler, self.robust_beta_scheduler]
self.MAX_KL += self.MAX_KL_INCREMENT
if should_adv_log:
# Save old parameter to investigate weight updates.
old_parameter = copy.deepcopy(self.policy_model.state_dict())
# Policy optimization step
if adversary_step:
policy_loss, surr_loss, entropy_bonus = self.adversary_policy_step(*args)
else:
policy_loss, surr_loss, entropy_bonus = self.policy_step(*args)
# If the anneal_lr option is set, then we decrease the
# learning rate at each training step
if self.ANNEAL_LR and increment_scheduler:
policy_scheduler.step()
if should_adv_log and not adversary_step:
log_value_losses(self, val_saps, 'heldout')
log_value_losses(self, saps, 'train')
old_pds = saps.action_means, saps.action_std
paper_constraints_logging(self, saps, old_pds,
table='paper_constraints_train')
paper_constraints_logging(self, val_saps, val_old_pds,
table='paper_constraints_heldout')
log_weight_updates(self, old_parameter, self.policy_model.state_dict())
self.store['paper_constraints_train'].flush_row()
self.store['paper_constraints_heldout'].flush_row()
self.store['value_data'].flush_row()
self.store['weight_updates'].flush_row()
if (self.params.MODE == 'robust_ppo' or self.params.MODE == 'adv_sa_ppo') and not adversary_step:
self.store['robust_ppo_data'].flush_row()
if self.ANNEAL_LR:
print(f'val lr: {val_scheduler.get_last_lr()}, policy lr: {policy_scheduler.get_last_lr()}')
val_loss = val_loss.mean().item()
return policy_loss, surr_loss, entropy_bonus, val_loss
def train_step(self):
if self.MODE == "adv_ppo" or self.MODE == "adv_trpo" or self.MODE == "adv_sa_ppo":
avg_ep_reward = 0.0
if self.PPO_LR_ADAM != 0.0:
for i in range(int(self.ADV_POLICY_STEPS)):
avg_ep_reward = self.train_step_impl(adversary_step = False, increment_scheduler = (i==self.ADV_POLICY_STEPS-1))
for i in range(int(self.ADV_ADVERSARY_STEPS)):
self.train_step_impl(adversary_step = True, increment_scheduler = (i==self.ADV_ADVERSARY_STEPS-1))
else:
print('skipping policy training because learning rate is 0. adv_policy_steps and adv_adversary_steps ignored.')
avg_ep_reward = self.train_step_impl(adversary_step = True)
else:
avg_ep_reward = self.train_step_impl(adversary_step = False)
self.n_steps += 1
print()
return avg_ep_reward
def train_step_impl(self, adversary_step=False, increment_scheduler=True):
'''
Take a training step, by first collecting rollouts, then
calculating advantages, then taking a policy gradient step, and
finally taking a value function step.
Inputs: None
Returns:
- The current reward from the policy (per actor)
'''
start_time = time.time()
table_name_suffix = "_adv" if adversary_step else ""
if adversary_step:
print('++++++++ Adversary training ++++++++++')
policy_model = self.adversary_policy_model
else:
print('++++++++ Policy training ++++++++++')
policy_model = self.policy_model
num_saps = self.T * self.NUM_ACTORS
saps, avg_ep_reward, avg_ep_length = self.collect_saps(num_saps, collect_adversary_trajectory=adversary_step)
policy_loss, surr_loss, entropy_bonus, val_loss = self.take_steps(saps, adversary_step=adversary_step, increment_scheduler=increment_scheduler)
# Logging code
print(f"Policy Loss: {policy_loss:.5g}, | Entropy Bonus: {entropy_bonus:.5g}, | Value Loss: {val_loss:.5g}")
print("Time elapsed (s):", time.time() - start_time)
if not policy_model.discrete:
mean_std = ch.exp(policy_model.log_stdev).mean()
print("Agent stdevs: %s" % mean_std.detach().cpu().numpy())
self.store.log_table_and_tb('optimization'+table_name_suffix, {
'mean_std': mean_std,
'final_policy_loss' : policy_loss,
'final_surrogate_loss': surr_loss,
'entropy_bonus': entropy_bonus,
})
else:
self.store['optimization'+table_name_suffix].update_row({
'mean_std': np.nan,
'final_policy_loss' : policy_loss,
'final_surrogate_loss': surr_loss,
'entropy_bonus': entropy_bonus,
})
self.store['optimization'+table_name_suffix].flush_row()
print("-" * 80)
sys.stdout.flush()
sys.stderr.flush()
# End logging code
return avg_ep_reward
def sarsa_step(self):
'''
Take a training step, by first collecting rollouts, and
taking a value function step.
Inputs: None
Returns:
- The current reward from the policy (per actor)
'''
print("-" * 80)
start_time = time.time()
num_saps = self.T * self.NUM_ACTORS
saps, avg_ep_reward, avg_ep_length = self.collect_saps(num_saps, should_log=True, test=True)
sarsa_loss, q = self.sarsa_steps(saps)
print("Sarsa Loss:", sarsa_loss.item())
print("Q:", q.item())
print("Time elapsed (s):", time.time() - start_time)
sys.stdout.flush()
sys.stderr.flush()
self.n_steps += 1
return avg_ep_reward
def run_test(self, max_len=2048, compute_bounds=False, use_full_backward=False, original_stdev=None):
print("-" * 80)
start_time = time.time()
if compute_bounds and not hasattr(self, "relaxed_policy_model"):
self.create_relaxed_model()
#saps, avg_ep_reward, avg_ep_length = self.collect_saps(num_saps=None, should_log=True, test=True, num_episodes=num_episodes)
with torch.no_grad():
output = self.run_test_trajectories(max_len=max_len)
ep_length, ep_reward, actions, action_means, states = output
msg = "Episode reward: %f | episode length: %f"
print(msg % (ep_reward, ep_length))
if compute_bounds:
if original_stdev is None:
kl_stdev = torch.exp(self.policy_model.log_stdev)
else:
kl_stdev = torch.exp(original_stdev)
eps = float(self.params.ROBUST_PPO_EPS) if self.params.ATTACK_EPS == "same" else float(self.params.ATTACK_EPS)
kl_upper_bound = get_state_kl_bound(self.relaxed_policy_model, states, action_means,
eps=eps, beta=0.0,
stdev=kl_stdev, use_full_backward=use_full_backward).mean()
kl_upper_bound = kl_upper_bound.item()
else:
kl_upper_bound = float("nan")
# Unroll the trajectories (actors, T, ...) -> (actors*T, ...)
return ep_length, ep_reward, actions.cpu().numpy(), action_means.cpu().numpy(), states.cpu().numpy(), kl_upper_bound
def run_test_trajectories(self, max_len, should_tqdm=False):
# Arrays to be updated with historic info
envs = self.envs
initial_states = self.reset_envs(envs)
if hasattr(self, "imit_network"):
self.imit_network.reset()
self.policy_model.reset()
self.val_model.reset()
# Holds information (length and true reward) about completed episodes
completed_episode_info = []
shape = (1, max_len)
rewards = ch.zeros(shape)
actions_shape = shape + (self.NUM_ACTIONS,)
actions = ch.zeros(actions_shape)
# Mean of the action distribution. Used for avoid unnecessary recomputation.
action_means = ch.zeros(actions_shape)
states_shape = (1, max_len+1) + initial_states.shape[2:]
states = ch.zeros(states_shape)
iterator = range(max_len) if not should_tqdm else tqdm.trange(max_len)
states[:, 0, :] = initial_states
last_states = states[:, 0, :]
for t in iterator:
if (t+1) % 100 == 0:
print('Step {} '.format(t+1))
# assert shape_equal([self.NUM_ACTORS, self.NUM_FEATURES], last_states)
# Retrieve probabilities
# action_pds: (# actors, # actions), prob dists over actions
# next_actions: (# actors, 1), indices of actions
# pause updating hidden state because the attack may inference the model.
self.policy_model.pause_history()
self.val_model.pause_history()
if hasattr(self, "imit_network"):
self.imit_network.pause_history()
maybe_attacked_last_states = self.apply_attack(last_states)
self.policy_model.continue_history()
self.val_model.continue_history()
if hasattr(self, "imit_network"):
self.imit_network.continue_history()
action_pds = self.policy_model(maybe_attacked_last_states)
if hasattr(self, "imit_network"):
_ = self.imit_network(maybe_attacked_last_states)
next_action_means, next_action_stds = action_pds
# Double check if the attack is within eps range.
if self.params.ATTACK_METHOD != "none":
max_eps = (maybe_attacked_last_states - last_states).abs().max()
attack_eps = float(self.params.ROBUST_PPO_EPS) if self.params.ATTACK_EPS == "same" else float(self.params.ATTACK_EPS)
if max_eps > attack_eps + 1e-5:
raise RuntimeError(f"{max_eps} > {attack_eps}. Attack implementation has bug and eps is not correctly handled.")
next_actions = self.policy_model.sample(action_pds)
# if discrete, next_actions is (# actors, 1)
# otw if continuous (# actors, 1, action dim)
next_actions = next_actions.unsqueeze(1)
ret = self.multi_actor_step(next_actions, envs)
# done_info = List of (length, reward) pairs for each completed trajectory
# (next_rewards, next_states, next_dones) act like multi-actor env.step()
done_info, next_rewards, next_states, next_not_dones = ret
# Reset the policy (if the policy has memory if we are done)
if next_not_dones.item() == 0:
self.policy_model.reset()
self.val_model.reset()
# Update histories
# each shape: (nact, t, ...) -> (nact, t + 1, ...)
pairs = [
(rewards, next_rewards),
(actions, next_actions), # The sampled actions.
(action_means, next_action_means), # The sampled actions.
(states, next_states),
]
last_states = next_states[:, 0, :]
for total, v in pairs:
if total is states:
# Next states, stores in the next position.
total[:, t+1] = v
else:
# The current action taken, and reward received.
total[:, t] = v
# If some of the actors finished AND this is not the last step
# OR some of the actors finished AND we have no episode information
if len(done_info) > 0:
completed_episode_info.extend(done_info)
break
if len(completed_episode_info) > 0:
ep_length, ep_reward = completed_episode_info[0]
else:
ep_length = np.nan
ep_reward = np.nan
actions = actions[0][:t+1]
action_means = action_means[0][:t+1]
states = states[0][:t+1]
to_ret = (ep_length, ep_reward, actions, action_means, states)
return to_ret
@staticmethod
def agent_from_data(store, row, cpu, extra_params=None, override_params=None, excluded_params=None):
'''
Initializes an agent from serialized data (via cox)
Inputs:
- store, the name of the store where everything is logged
- row, the exact row containing the desired data for this agent
- cpu, True/False whether to use the CPU (otherwise sends to GPU)
- extra_params, a dictionary of extra agent parameters. Only used
when a key does not exist from the loaded cox store.
- override_params, a dictionary of agent parameters that will override
current agent parameters.
- excluded_params, a dictionary of parameters that we do not copy or
override.
Outputs:
- agent, a constructed agent with the desired initialization and
parameters
- agent_params, the parameters that the agent was constructed with
'''
ckpts = store['final_results']
get_item = lambda x: list(row[x])[0]
items = ['val_model', 'policy_model', 'val_opt', 'policy_opt']
names = {i: get_item(i) for i in items}
param_keys = list(store['metadata'].df.columns)
param_values = list(store['metadata'].df.iloc[0,:])
def process_item(v):
try:
return v.item()
except:
return v
param_values = [process_item(v) for v in param_values]
agent_params = {k:v for k, v in zip(param_keys, param_values)}
if 'adam_eps' not in agent_params:
agent_params['adam_eps'] = 1e-5
if 'cpu' not in agent_params:
agent_params['cpu'] = cpu
# Update extra params if they do not exist in current parameters.
if extra_params is not None:
for k in extra_params.keys():
if k not in agent_params and k not in excluded_params:
print(f'adding key {k}={extra_params[k]}')
agent_params[k] = extra_params[k]
if override_params is not None:
for k in override_params.keys():
if k not in excluded_params and override_params[k] is not None and override_params[k] != agent_params[k]:
print(f'overwriting key {k}: old={agent_params[k]}, new={override_params[k]}')
agent_params[k] = override_params[k]
agent = Trainer.agent_from_params(agent_params)
def load_state_dict(model, ckpt_name):
mapper = ch.device('cuda:0') if not cpu else ch.device('cpu')
state_dict = ckpts.get_state_dict(ckpt_name, map_location=mapper)
model.load_state_dict(state_dict)
load_state_dict(agent.policy_model, names['policy_model'])
load_state_dict(agent.val_model, names['val_model'])
if agent.ANNEAL_LR:
agent.POLICY_SCHEDULER.last_epoch = get_item('iteration')
agent.VALUE_SCHEDULER.last_epoch = get_item('iteration')
load_state_dict(agent.POLICY_ADAM, names['policy_opt'])
load_state_dict(agent.val_opt, names['val_opt'])
agent.envs = ckpts.get_pickle(get_item('envs'))
return agent, agent_params
@staticmethod
def agent_from_params(params, store=None):
'''
Construct a trainer object given a dictionary of hyperparameters.
Trainer is in charge of sampling trajectories, updating policy network,
updating value network, and logging.
Inputs:
- params, dictionary of required hyperparameters
- store, a cox.Store object if logging is enabled
Outputs:
- A Trainer object for training a PPO/TRPO agent
'''
if params['history_length'] > 0:
agent_policy = CtsLSTMPolicy
if params['use_lstm_val']:
agent_value = ValueLSTMNet
else:
agent_value = value_net_with_name(params['value_net_type'])
else:
agent_policy = policy_net_with_name(params['policy_net_type'])
agent_value = value_net_with_name(params['value_net_type'])
advanced_logging = params['advanced_logging'] and store is not None
log_every = params['log_every'] if store is not None else 0
if params['cpu']:
torch.set_num_threads(1)
p = Trainer(agent_policy, agent_value, params, store, log_every=log_every,
advanced_logging=advanced_logging)
return p
| 69,824 | 47.557024 | 181 | py |
ATLA_robust_RL | ATLA_robust_RL-main/src/policy_gradients/models.py | import torch.nn as nn
import math
import functools
import torch as ch
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from .torch_utils import *
'''
Neural network models for estimating value and policy functions
Contains:
- Initialization utilities
- Value Network(s)
- Policy Network(s)
- Retrieval Function
'''
########################
### INITIALIZATION UTILITY FUNCTIONS:
# initialize_weights
########################
HIDDEN_SIZES = (64, 64)
ACTIVATION = nn.Tanh
STD = 2**0.5
def initialize_weights(mod, initialization_type, scale=STD):
'''
Weight initializer for the models.
Inputs: A model, Returns: none, initializes the parameters
'''
for p in mod.parameters():
if initialization_type == "normal":
p.data.normal_(0.01)
elif initialization_type == "xavier":
if len(p.data.shape) >= 2:
nn.init.xavier_uniform_(p.data)
else:
p.data.zero_()
elif initialization_type == "orthogonal":
if len(p.data.shape) >= 2:
orthogonal_init(p.data, gain=scale)
else:
p.data.zero_()
else:
raise ValueError("Need a valid initialization key")
########################
### INITIALIZATION UTILITY FUNCTIONS:
# Generic Value network, Value network MLP
########################
class ValueDenseNet(nn.Module):
'''
An example value network, with support for arbitrarily many
fully connected hidden layers (by default 2 * 128-neuron layers),
maps a state of size (state_dim) -> a scalar value.
'''
def __init__(self, state_dim, init=None, hidden_sizes=(64, 64), activation=None):
'''
Initializes the value network.
Inputs:
- state_dim, the input dimension of the network (i.e dimension of state)
- hidden_sizes, an iterable of integers, each of which represents the size
of a hidden layer in the neural network.
Returns: Initialized Value network
'''
super().__init__()
if isinstance(activation, str):
self.activation = activation_with_name(activation)()
else:
# Default to tanh.
self.activation = ACTIVATION()
self.affine_layers = nn.ModuleList()
prev = state_dim
for h in hidden_sizes:
l = nn.Linear(prev, h)
if init is not None:
initialize_weights(l, init)
self.affine_layers.append(l)
prev = h
self.final = nn.Linear(prev, 1)
if init is not None:
initialize_weights(self.final, init, scale=1.0)
def initialize(self, init="orthogonal"):
for l in self.affine_layers:
initialize_weights(l, init)
initialize_weights(self.final, init, scale=1.0)
def forward(self, x):
'''
Performs inference using the value network.
Inputs:
- x, the state passed in from the agent
Returns:
- The scalar (float) value of that state, as estimated by the net
'''
for affine in self.affine_layers:
x = self.activation(affine(x))
value = self.final(x)
return value
def get_value(self, x):
return self(x)
def reset(self):
return
# MLP does not maintain history.
def pause_history(self):
return
def continue_history(self):
return
def pack_history(features, not_dones):
# Features has dimension (N, state_dim), where N contains a few episodes
# not_dones splits these episodes (0 in not_dones is end of an episode)
nnz = ch.nonzero(1.0 - not_dones, as_tuple=False).view(-1).cpu().numpy()
# nnz has the position where not_dones = 0 (end of episode)
all_pieces = []
lengths = []
start = 0
for i in nnz:
end = i + 1
all_pieces.append(features[start:end, :])
lengths.append(end - start)
start = end
# The last episode is missing, unless the previous episode end at the last element.
if end != features.size(0):
all_pieces.append(features[end:, :])
lengths.append(features.size(0) - end)
# print(lengths)
padded = pad_sequence(all_pieces, batch_first=True)
packed = pack_padded_sequence(padded, lengths, batch_first=True, enforce_sorted=False)
return packed
def unpack_history(padded_pieces, lengths):
# padded pieces in shape (batch, time, hidden)
# lengths in shape (batch,)
all_pieces = []
for i, l in enumerate(lengths.cpu().numpy()):
# For each batch element in padded_pieces, get the first l elements.
all_pieces.append(padded_pieces[i, 0:l, :])
# return shape (N, hidden)
return ch.cat(all_pieces, dim=0)
class ValueLSTMNet(nn.Module):
'''
An example value network, with support for arbitrarily many
fully connected hidden layers (by default 2 * 128-neuron layers),
maps a state of size (state_dim) -> a scalar value.
'''
def __init__(self, state_dim, init=None, hidden_sizes=(64, 64), activation=None):
'''
Initializes the value network.
Inputs:
- state_dim, the input dimension of the network (i.e dimension of state)
- hidden_sizes, an iterable of integers, each of which represents the size
of a hidden layer in the neural network.
Returns: Initialized Value network
'''
print('Using LSTM for value function!!')
super().__init__()
self.hidden_sizes = hidden_sizes
self.embedding_layer = nn.Linear(state_dim, self.hidden_sizes[0])
initialize_weights(self.embedding_layer, init, scale=0.01)
self.lstm = nn.LSTM(input_size=self.hidden_sizes[0], hidden_size=self.hidden_sizes[1], num_layers=1, batch_first=True)
self.final = nn.Linear(self.hidden_sizes[-1], 1)
if init is not None:
initialize_weights(self.final, init, scale=1.0)
# LSTM hidden states. Only used in inference mode when a batch size of 1 is used.
self.hidden = [ch.zeros(1, 1, self.hidden_sizes[1]),
ch.zeros(1, 1, self.hidden_sizes[1])]
self.paused = False
def initialize(self, init="orthogonal"):
for l in self.affine_layers:
initialize_weights(l, init)
initialize_weights(self.final, init, scale=1.0)
def forward(self, states, not_dones=None):
if not_dones is not None: # we get a full batch of states, we split them into episodes based on not_dones
assert states.size(0) == 1 and states.size(1) != 1 and states.ndim == 3 # input dimension must be in shape (1, N, state_dim)
# New shape: (N, state_dim)
states = states.squeeze(0)
features = self.embedding_layer(states)
# New shape: (N, )
not_dones = not_dones.squeeze(0)
# Pack states into episodes according to not_dones
packed_features = pack_history(features, not_dones)
# Run LSTM
outputs, _ = self.lstm(packed_features)
# pad output results
padded, lengths = pad_packed_sequence(outputs, batch_first=True)
# concate output to a single array (N, hidden_dim)
hidden = unpack_history(padded, lengths)
"""
hidden = F.relu(features)
"""
# final output, apply linear transformation on hidden output.
value = self.final(hidden)
"""
print(states.size(), not_dones.size())
print(padded.size())
print(hidden.size())
print(lengths)
print(value.size())
import traceback; traceback.print_stack()
input()
"""
# add back the extra dimension. Shape (1, N, 1)
return value.unsqueeze(0)
elif states.ndim == 2 and states.size(0) == 1:
# We get a state with batch shape 1. This is only possible in inferece and attack mode.
# embedding has shape (1, 1, hidden_dim)
embedding = self.embedding_layer(states).unsqueeze(1)
# Use saved hidden states
_, hidden = self.lstm(embedding, self.hidden)
# hidden dimension: (1, 1, hidden_size)
output = self.final(hidden[0])
# save hidden state.
if not self.paused:
self.hidden[0] = hidden[0]
self.hidden[1] = hidden[1]
# squeeze the time dimension, return shape (1, action_dim)
value = self.final(hidden[0]).squeeze(1)
return value
else:
raise NotImplementedError
# state: (N, time, state_dim)
embeddings = self.embedding_layer(states)
# Run LSTM, output (N, time, hidden_dim)
# outputs = F.relu(embeddings)
outputs, _ = self.lstm(embeddings)
# final output (N, time, 1)
value = self.final(outputs)
# add back the extra dimension. Shape (1, N, 1)
return value
def multi_forward(self, x, hidden):
embeddings = self.embedding_layer(x)
# print('embeddings', embeddings.size())
# Run LSTM with packed sequence
outputs, hidden = self.lstm(embeddings, hidden)
# desired outputs dimension: (batch, time_step, hidden_size)
# print('outputs', outputs.size())
"""
outputs = F.relu(embeddings)
"""
# print('unpacked_outputs', outputs.size())
# value has size (batch, time_step, action_dim)
value = self.final(outputs)
# print('value', value.size())
return value, hidden
def get_value(self, *args):
return self(*args)
# Reset LSTM hidden states.
def reset(self):
# LSTM hidden states.
self.hidden = [ch.zeros(1, 1, self.hidden_sizes[1]),
ch.zeros(1, 1, self.hidden_sizes[1])]
def pause_history(self):
self.paused = True
def continue_history(self):
self.paused = False
########################
### POLICY NETWORKS
# Discrete and Continuous Policy Examples
########################
'''
A policy network can be any class which is initialized
with a state_dim and action_dim, as well as optional named arguments.
Must provide:
- A __call__ override (or forward, for nn.Module):
* returns a tensor parameterizing a distribution, given a
BATCH_SIZE x state_dim tensor representing shape
- A function calc_kl(p, q):
* takes in two batches tensors which parameterize probability
distributions (of the same form as the output from __call__),
and returns the KL(p||q) tensor of length BATCH_SIZE
- A function entropies(p):
* takes in a batch of tensors parameterizing distributions in
the same way and returns the entropy of each element in the
batch as a tensor
- A function sample(p):
* takes in a batch of tensors parameterizing distributions in
the same way as above and returns a batch of actions to be
performed
- A function get_likelihoods(p, actions):
* takes in a batch of parameterizing tensors (as above) and an
equal-length batch of actions, and returns a batch of probabilities
indicating how likely each action was according to p.
'''
class DiscPolicy(nn.Module):
'''
A discrete policy using a fully connected neural network.
The parameterizing tensor is a categorical distribution over actions
'''
def __init__(self, state_dim, action_dim, init, hidden_sizes=HIDDEN_SIZES, time_in_state=False, share_weights=False):
'''
Initializes the network with the state dimensionality and # actions
Inputs:
- state_dim, dimensionality of the state vector
- action_dim, # of possible discrete actions
- hidden_sizes, an iterable of length #layers,
hidden_sizes[i] = number of neurons in layer i
- time_in_state, a boolean indicating whether the time is
encoded in the state vector
'''
super().__init__()
self.activation = ACTIVATION()
self.time_in_state = time_in_state
self.discrete = True
self.affine_layers = nn.ModuleList()
prev_size = state_dim
for i in hidden_sizes:
lin = nn.Linear(prev_size, i)
initialize_weights(lin, init)
self.affine_layers.append(lin)
prev_size = i
self.final = nn.Linear(prev_size, action_dim)
# For the case where we want to share parameters
# between the policy and value networks
self.share_weights = share_weights
if share_weights:
if time_in_state:
self.final_value = nn.Linear(prev_size + 1, 1)
else:
self.final_value = nn.Linear(prev_size, 1)
initialize_weights(self.final_value, init, scale=1.0)
def forward(self, x):
'''
Outputs the categorical distribution (via softmax)
by feeding the state through the neural network
'''
# If the time is in the state, discard it
if self.time_in_state:
x = x[:,:-1]
for affine in self.affine_layers:
x = self.activation(affine(x))
probs = F.softmax(self.final(x))
return probs
def calc_kl(self, p, q, get_mean=True): # TODO: does not return a list
'''
Calculates E KL(p||q):
E[sum p(x) log(p(x)/q(x))]
Inputs:
- p, first probability distribution (NUM_SAMPLES, NUM_ACTIONS)
- q, second probability distribution (NUM_SAMPLES, NUM_ACTIONS)
Returns:
- Empirical KL from p to q
'''
p, q = p.squeeze(), q.squeeze()
assert shape_equal_cmp(p, q)
kl = (p * (ch.log(p) - ch.log(q))).sum(-1)
return kl
def entropies(self, p):
'''
p is probs of shape (batch_size, action_space). return mean entropy
across the batch of states
'''
entropies = (p * ch.log(p)).sum(dim=1)
return entropies
def get_loglikelihood(self, p, actions):
'''
Inputs:
- p, batch of probability tensors
- actions, the actions taken
'''
try:
dist = ch.distributions.categorical.Categorical(p)
return dist.log_prob(actions)
except Exception as e:
raise ValueError("Numerical error")
def sample(self, probs):
'''
given probs, return: actions sampled from P(.|s_i), and their
probabilities
- s: (batch_size, state_dim)
Returns actions:
- actions: shape (batch_size,)
'''
dist = ch.distributions.categorical.Categorical(probs)
actions = dist.sample()
return actions.long()
def get_value(self, x):
# If the time is in the state, discard it
assert self.share_weights, "Must be sharing weights to use get_value"
t = None
if self.time_in_state:
t = x[...,-1:]
x = x[...,:-1]
for affine in self.affine_layers:
x = self.activation(affine(x))
if self.time_in_state:
return self.final_value(ch.cat((x, t), -1))
else:
return self.final_value(x)
class CtsPolicy(nn.Module):
'''
A continuous policy using a fully connected neural network.
The parameterizing tensor is a mean and standard deviation vector,
which parameterize a gaussian distribution.
'''
def __init__(self, state_dim, action_dim, init, hidden_sizes=HIDDEN_SIZES,
time_in_state=False, share_weights=False, activation=None, use_merged_bias=False):
super().__init__()
if isinstance(activation, str):
self.activation = activation_with_name(activation)()
else:
# Default to tanh.
self.activation = ACTIVATION()
print('Using activation function', self.activation)
self.action_dim = action_dim
self.discrete = False
self.time_in_state = time_in_state
self.use_merged_bias = use_merged_bias
self.affine_layers = nn.ModuleList()
prev_size = state_dim
for i in hidden_sizes:
if use_merged_bias:
# Use an extra dimension for weight perturbation, simulating bias.
lin = nn.Linear(prev_size + 1, i, bias=False)
else:
lin = nn.Linear(prev_size, i, bias=True)
initialize_weights(lin, init)
self.affine_layers.append(lin)
prev_size = i
if use_merged_bias:
self.final_mean = nn.Linear(prev_size + 1, action_dim, bias=False)
else:
self.final_mean = nn.Linear(prev_size, action_dim, bias=True)
initialize_weights(self.final_mean, init, scale=0.01)
# For the case where we want to share parameters
# between the policy and value networks
self.share_weights = share_weights
if share_weights:
assert not use_merged_bias
if time_in_state:
self.final_value = nn.Linear(prev_size + 1, 1)
else:
self.final_value = nn.Linear(prev_size, 1)
initialize_weights(self.final_value, init, scale=1.0)
stdev_init = ch.zeros(action_dim)
self.log_stdev = ch.nn.Parameter(stdev_init)
def forward(self, x):
# If the time is in the state, discard it
if self.time_in_state:
x = x[:,:-1]
for affine in self.affine_layers:
if self.use_merged_bias:
# Generate an extra "one" for each element, which acts as a bias.
bias_padding = ch.ones(x.size(0),1)
x = ch.cat((x, bias_padding), dim=1)
else:
pass
x = self.activation(affine(x))
if self.use_merged_bias:
bias_padding = ch.ones(x.size(0),1)
x = ch.cat((x, bias_padding), dim=1)
means = self.final_mean(x)
std = ch.exp(self.log_stdev)
return means, std
def get_value(self, x):
assert self.share_weights, "Must be sharing weights to use get_value"
# If the time is in the state, discard it
t = None
if self.time_in_state:
t = x[...,-1:]
x = x[...,:-1]
for affine in self.affine_layers:
x = self.activation(affine(x))
if self.time_in_state:
return self.final_value(ch.cat((x, t), -1))
else:
return self.final_value(x)
def sample(self, p):
'''
Given prob dist (mean, var), return: actions sampled from p_i, and their
probabilities. p is tuple (means, var). means shape
(batch_size, action_space), var (action_space,), here are batch_size many
prboability distributions you're sampling from
Returns tuple (actions, probs):
- actions: shape (batch_size, action_dim)
- probs: shape (batch_size, action_dim)
'''
means, std = p
return (means + ch.randn_like(means)*std).detach()
def get_loglikelihood(self, p, actions):
try:
mean, std = p
nll = 0.5 * ((actions - mean) / std).pow(2).sum(-1) \
+ 0.5 * np.log(2.0 * np.pi) * actions.shape[-1] \
+ self.log_stdev.sum(-1)
return -nll
except Exception as e:
raise ValueError("Numerical error")
def calc_kl(self, p, q):
'''
Get the expected KL distance between two sets of gaussians over states -
gaussians p and q where p and q are each tuples (mean, var)
- In other words calculates E KL(p||q): E[sum p(x) log(p(x)/q(x))]
- From https://stats.stackexchange.com/a/60699
'''
p_mean, p_std = p
q_mean, q_std = q
p_var, q_var = p_std.pow(2), q_std.pow(2)
assert shape_equal([-1, self.action_dim], p_mean, q_mean)
assert shape_equal([self.action_dim], p_var, q_var)
d = q_mean.shape[1]
diff = q_mean - p_mean
log_quot_frac = ch.log(q_var).sum() - ch.log(p_var).sum()
tr = (p_var / q_var).sum()
quadratic = ((diff / q_var) * diff).sum(dim=1)
kl_sum = 0.5 * (log_quot_frac - d + tr + quadratic)
assert kl_sum.shape == (p_mean.shape[0],)
return kl_sum
def entropies(self, p):
'''
Get entropies over the probability distributions given by p
p_i = (mean, var), p mean is shape (batch_size, action_space),
p var is shape (action_space,)
'''
_, std = p
detp = determinant(std)
d = std.shape[0]
entropies = ch.log(detp) + .5 * (d * (1. + math.log(2 * math.pi)))
return entropies
def reset(self):
return
def pause_history(self):
return
def continue_history(self):
return
class CtsLSTMPolicy(CtsPolicy):
'''
A continuous policy using a fully connected neural network.
The parameterizing tensor is a mean and standard deviation vector,
which parameterize a gaussian distribution.
'''
def __init__(self, state_dim, action_dim, init, hidden_sizes=HIDDEN_SIZES,
time_in_state=False, share_weights=False, activation=None, use_merged_bias=False):
print('Using LSTM policy!!')
assert share_weights is False
assert use_merged_bias is False
assert time_in_state is False
super().__init__(state_dim, action_dim, init, hidden_sizes, time_in_state, share_weights, activation, use_merged_bias)
self.hidden_sizes = hidden_sizes
self.action_dim = action_dim
self.discrete = False
self.time_in_state = time_in_state
self.use_merged_bias = use_merged_bias
self.share_weights = share_weights
self.paused = False
self.embedding_layer = nn.Linear(state_dim, self.hidden_sizes[0])
initialize_weights(self.embedding_layer, init, scale=0.01)
self.lstm = nn.LSTM(input_size=self.hidden_sizes[0], hidden_size=self.hidden_sizes[1], num_layers=1, batch_first=True)
self.output_layer = nn.Linear(self.hidden_sizes[-1], action_dim)
initialize_weights(self.output_layer, init, scale=1.0)
stdev_init = ch.zeros(action_dim)
self.log_stdev = ch.nn.Parameter(stdev_init)
# LSTM hidden states.
self.hidden = [ch.zeros(1, 1, self.hidden_sizes[1]),
ch.zeros(1, 1, self.hidden_sizes[1])]
def forward(self, x, not_dones=None):
if isinstance(x, ch.Tensor) and x.size(0) != 1:
# We are given a batch of states. We need not_dones to split them into episodes.
assert not_dones is not None
# input dimension must be in shape (N, state_dim)
# not_dones has shape: (N, )
# features shape: (N, hidden_dim)
features = self.embedding_layer(x)
# Pack states into episodes according to not_dones
packed_features = pack_history(features, not_dones)
# Run LSTM
outputs, _ = self.lstm(packed_features)
# pad output results
padded, lengths = pad_packed_sequence(outputs, batch_first=True)
# concate output to a single array (N, hidden_dim)
hidden = unpack_history(padded, lengths)
"""
hidden = F.relu(features)
"""
# final output, apply linear transformation on hidden output.
means = self.output_layer(hidden)
std = ch.exp(self.log_stdev)
return means, std
if isinstance(x, ch.Tensor) and x.ndim == 2: # inference mode, state input one by one. No time dimension.
assert not_dones is None
# it must have batch size 1.
assert x.size(0) == 1
# input x dimension: (1, time_slice, state_dim)
# We use torch.nn.utils.rnn.pack_padded_sequence() as input.
embedding = self.embedding_layer(x).unsqueeze(0)
# embedding dimension: (batch, time_slice, hidden_dim)
_, hidden = self.lstm(embedding, self.hidden)
# _, hidden = self.lstm(embedding)
"""
hidden = F.relu(embedding)
hidden = [hidden, hidden]
"""
# hidden dimension: (1, 1, hidden_size)
output = self.output_layer(hidden[0])
# save hidden state.
if not self.paused:
self.hidden[0] = hidden[0]
self.hidden[1] = hidden[1]
means = output.squeeze(0) # remove the extra dimension.
std = ch.exp(self.log_stdev)
return means, std
else: # with time dimension, used for training LSTM.
raise ValueError(f'Unsupported input {x} to LSTM policy')
def multi_forward(self, x, hidden=None):
embeddings = self.embedding_layer(x)
# print('embeddings', embeddings.size())
# Run LSTM with packed sequence
outputs, hidden = self.lstm(embeddings, hidden)
# desired outputs dimension: (batch, time_step, hidden_size)
# print('outputs', outputs.size())
"""
outputs = F.relu(embeddings)
"""
# print('unpacked_outputs', outputs.size())
# means has size (batch, time_step, action_dim)
means = self.output_layer(outputs)
# print('means', means.size())
# std is still time and history independent.
std = ch.exp(self.log_stdev)
return means, std, hidden
# Reset LSTM hidden states.
def reset(self):
# LSTM hidden states.
self.hidden = [ch.zeros(1, 1, self.hidden_sizes[1]),
ch.zeros(1, 1, self.hidden_sizes[1])]
def pause_history(self):
self.paused = True
def continue_history(self):
self.paused = False
class CtsPolicyLarger(CtsPolicy):
def __init__(self, state_dim, action_dim, init,
time_in_state=False, share_weights=False, activation=None, use_merged_bias=False):
super().__init__(state_dim, action_dim, init, hidden_sizes=[400,300], time_in_state=False,
share_weights=False, activation='relu', use_merged_bias=False)
def forward(self, x):
mean, std = super().forward(x)
return ch.tanh(mean), std
class CtsPolicySAC(CtsPolicy):
def __init__(self, state_dim, action_dim, init,
time_in_state=False, share_weights=False, activation=None, use_merged_bias=False):
super().__init__(state_dim, action_dim, init, hidden_sizes=[256,256], time_in_state=False,
share_weights=False, activation='relu', use_merged_bias=False)
def forward(self, x):
mean, std = super().forward(x)
return ch.tanh(mean), std
## Retrieving networks
# Make sure to add newly created networks to these dictionaries!
POLICY_NETS = {
"DiscPolicy": DiscPolicy,
"CtsPolicy": CtsPolicy,
"CtsPolicyLarger": CtsPolicyLarger,
"CtsPolicySAC": CtsPolicySAC,
}
VALUE_NETS = {
"ValueNet": ValueDenseNet,
}
def partialclass(cls, *args, **kwds):
class NewCls(cls):
__init__ = functools.partialmethod(cls.__init__, *args, **kwds)
return NewCls
ACTIVATIONS = {
"tanh": nn.Tanh,
"relu": nn.ReLU,
"leaky": nn.LeakyReLU,
"leaky0.05": partialclass(nn.LeakyReLU, negative_slope=0.05),
"leaky0.1": partialclass(nn.LeakyReLU, negative_slope=0.1),
"hardtanh": nn.Hardtanh,
}
def activation_with_name(name):
return ACTIVATIONS[name]
def policy_net_with_name(name):
return POLICY_NETS[name]
def value_net_with_name(name):
return VALUE_NETS[name]
| 27,931 | 34.902314 | 137 | py |
GaPT | GaPT-main/tools/math_utils.py | from typing import Tuple
import numba as nb
import numpy as np
import torch as t
import copy
dtype_torch = t.float64
@nb.jit(nopython=True)
def nb_balance_ss(F: np.ndarray, iters: int) -> np.ndarray:
dim = F.shape[0]
dtype = F.dtype
d = np.ones((dim,), dtype=dtype)
for k in range(iters):
for i in range(dim):
tmp = np.copy(F[:, i])
tmp[i] = 0.
c = np.linalg.norm(tmp, 2)
tmp2 = np.copy(F[i, :])
tmp2[i] = 0.
r = np.linalg.norm(tmp2, 2)
f = np.sqrt(r / c)
d[i] *= f
F[:, i] *= f
F[i, :] /= f
return d
def balance_ss(F: t.Tensor, L: t.Tensor, H: t.Tensor, q: t.Tensor, n_iter: int = 5) -> Tuple[t.Tensor, ...]:
"""Balance state-space model to have better numerical stability
Parameters
----------
F : t.Tensor
Matrix
L : t.Tensor
Matrix
H : t.Tensor
Measurement matrix
q : t.Tensor
Spectral density
P: t.Tensor, optional
...
n_iter : int
Iteration of balancing
Returns
-------
F : t.Tensor
...
L : t.Tensor
...
H : t.Tensor
...
q : t.Tensor
...
References
----------
https://arxiv.org/pdf/1401.5766.pdf
"""
d = t.tensor(nb_balance_ss(F=copy.deepcopy(F.numpy()), iters=n_iter))
d = t.reshape(d, (F.shape[0],)) # This is to make sure that the shape of d is known at compilation time.
F = F * d[None, :] / d[:, None]
L = L / d[:, None]
H = H * d[None, :]
tmp3 = t.max(t.abs(L))
L = L / tmp3
q = (tmp3 ** 2) * q
tmp4 = t.max(t.abs(H))
H = H / tmp4
q = (tmp4 ** 2) * q
return F, L, H, q
def solve_lyap_vec(F: t.Tensor, L: t.Tensor, Q: t.Tensor) -> t.Tensor:
"""Vectorized Lyapunov equation solver
F P + P F' + L Q L' = 0
Parameters
----------
F : t.Tensor
...
L : t.Tensor
...
Q : t.Tensor
...
Returns
-------
Pinf : t.Tensor
Steady state covariance
"""
dim = F.shape[0]
op1 = F
op2 = t.eye(dim, dtype=dtype_torch)
F1 = t.kron(op2, op1)
F2 = t.kron(op1, op2)
F = F1 + F2
Q = t.matmul(L, t.matmul(Q, L.t()))
Pinf = t.reshape(t.linalg.solve(F, t.reshape(Q, (-1, 1))), (dim, dim))
Pinf = -0.5 * (Pinf + Pinf.t())
return Pinf
| 2,402 | 20.845455 | 109 | py |
GaPT | GaPT-main/base/regressor/base_regressor.py | import abc
import gpytorch
import torch
import numpy as np
import logging
import os
from tqdm import tqdm
from base.gpytgp.gppyt_model import GPYTModel
from base.data.data_loader import GPDataLoader
from base.gpytgp.gppyt_trainer import GPYTrainer
from torch import optim
from tools.misc import siso_binary_search, distance_3d
class GPRegressor(metaclass=abc.ABCMeta):
# TODO: ADD DESCRIPTION OF THE CLASS
def __init__(self, kernel: gpytorch.kernels.Kernel,
likelihood: gpytorch.likelihoods.Likelihood, reg_name: str, input_dim: int = 1):
self.__name__ = reg_name # Name of the Regressor
# Definition of the kernel and the likelihood used for the Gpytorch
# Regressor
self._gpt_kernel = kernel
self._gpt_likelihood = likelihood
# The implementation of the model is skipped for now: we include the possibility
# load the pre-trained parameters.
self._gpt_model = None # Gpytorch kernel model implementation
self._pssgp_cov = None # LTI-SDE kernel model implementation
self._kf = None # Kalman's filter for regression and smoothing
self._order = None # Order of the RBF approximation for (P)SSGP
self._balancing_iter = None # Number of balancing steps for the resulting SDE to make it more stable
# delta x between two samples
self._covar_P = None
self._is_ready = False
# dimension of input
self.input_dim = input_dim
# TODO: MAKE IT PUBLIC NOT PRIVATE
def _reset_filter(self):
if self._is_ready:
p_inf = self._pssgp_cov.get_sde()[0].numpy()
self._covar_P = p_inf
else:
logging.warning('{}: unable to set P0 of the (Kalman Filter),'
'the model has not been initialized.'.format(self.__name__))
def _reset_filter_miso(self):
if self._is_ready:
p_inf = self._pssgp_cov.get_sde()[0].numpy()
self._covar_P = np.zeros((p_inf.shape[0] * self.input_dim, p_inf.shape[0] * self.input_dim))
for i in range(self.input_dim):
self._covar_P[p_inf.shape[0] * i:p_inf.shape[0] * (i + 1)
, p_inf.shape[0] * i:p_inf.shape[0] * (i + 1)] = p_inf
else:
logging.warning('{}: unable to set P0 of the (Kalman Filter),'
'the model has not been initialized.'.format(self.__name__))
def train_hyperparams(self, train_dataloader: GPDataLoader, gp_training_conf, verbose=False):
logging.debug('Training of the {} initialized'.format(self.__name__))
x_train, y_train, _ = next(iter(train_dataloader))
self._gpt_model = GPYTModel(likelihood=self._gpt_likelihood, kernel=self._gpt_kernel,
train_x=x_train, train_y=y_train)
# GET THE TRAINING CONF
training_conf = gp_training_conf
# LOSS CREATION
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gpt_likelihood, self._gpt_model)
# OPTIMIZER
optimizer_clss = getattr(optim, training_conf['optimizer']["type"])
optimizer = optimizer_clss(params=self._gpt_model.parameters(), **training_conf['optimizer']["args"])
# TRAINER CREATION
trainer = GPYTrainer(loss=mll, gp_model=self._gpt_model, likelihood=self._gpt_likelihood,
train_loader=train_dataloader, optimizer=optimizer, config=training_conf, verbose=verbose)
# MODEL TRAINING
trainer.train()
# Put the model and the likelihood in the evaluation mode after training
self._gpt_model.eval()
self._gpt_likelihood.eval()
# Call the function to create the LTI-SDE Model
self._create_sde_model()
# FINALIZING
self._is_ready = True
self._reset_filter()
def predict_gpyt(self, x_input: torch.Tensor) -> (np.array, np.array, np.array):
with torch.no_grad(), gpytorch.settings.fast_pred_var():
observed_pred = self._gpt_likelihood(self._gpt_model(x_input))
mean = observed_pred.mean
lower, upper = observed_pred.confidence_region()
return mean, lower, upper
def predict_kf_mean_cov(self, x_training: np.array, y_training: np.array, z_mean, z_std, x_test: np.array,
Qgain=1.0, Rgain=1.0):
if self._is_ready:
# Reset the filte
self._reset_filter()
# Init values
len_data = x_test.shape[0]
xt = np.zeros((self._order, 1)) # set the init value = 0
y_predict = np.zeros(len_data)
y_lower = np.zeros(len_data)
y_upper = np.zeros(len_data)
# get the starting index on the training dataset
xt[0, :] = 0.0
count = -1
curr_x = 0
for ix in tqdm(range(0, x_test.shape[0]), desc="KF predicting", colour='magenta', leave=False):
in_data = x_test[ix]
if x_training[0] > in_data:
xt[0, :] = y_training[0]
if x_training[x_training.shape[0] - 1] < in_data:
count = x_training.shape[0] - 1
else:
count = siso_binary_search(x_training, in_data)
count -= 10
if count < 0:
count = 1
curr_x = x_training[count - 1]
while x_training[count] < in_data and count < x_training.shape[0] - 1:
dx = x_training[count] - curr_x
x_new, P_new = self._kf.predict(xt, self._covar_P, dx, Qgain_factor=Qgain)
xt, self._covar_P = self._kf.update(y_training[count], x_new, P_new, Rgain_factor=Rgain)
curr_x = x_training[count]
count += 1
dx = in_data - curr_x
x_new, P_new = self._kf.predict(xt, self._covar_P, dx, Qgain_factor=Qgain)
y_predict[ix] = z_mean + max(min(y_training), min(max(y_training), x_new[0])) * z_std
y_lower[ix] = (z_mean + x_new[0] * z_std) - (P_new[0, 0] * z_std) / 2
y_upper[ix] = (z_mean + x_new[0] * z_std) + (P_new[0, 0] * z_std) / 2
return y_predict, y_lower, y_upper
else:
msg = '{}: The LTI-MODEL for the class must be created or loaded before using it!'.format(self.__name__)
logging.error(msg)
raise Exception(msg)
def predict_kf_miso(self, x_training: np.array, y_training: np.array, z_mean, z_std, x_test: np.array,
reversed: bool, Qgain=1.0, Rgain=1.0):
if self._is_ready:
self._reset_filter_miso()
len_data = x_test.shape[0]
xt = np.zeros((self._order * self.input_dim, 1))
xt[0, :] = 0.0
y_predict = np.zeros(len_data)
y_lower = np.zeros(len_data)
y_upper = np.zeros(len_data)
for ix in tqdm(range(1, x_test.shape[0]), desc="KF predicting", colour='magenta', leave=False):
in_data = x_test[ix]
count = 0
cost = 999999999999
for ix_tr in range(0, x_training.shape[0]):
distance = distance_3d(x_training[ix_tr], in_data)
if distance < cost:
cost = distance
count = ix_tr
count -= 20
if count < 0:
count = 1
curr_x = x_training[count - 1]
while distance_3d(x_training[count], in_data) > cost and count < x_training.shape[0] - 1:
dx = x_training[count] - curr_x
x_new, P_new = self._kf.predict(xt, self._covar_P, dx)
xt, self._covar_P = self._kf.update(y_training[count], x_new, P_new)
curr_x = x_training[count]
count += 1
dx = in_data - curr_x
# TODO: WE ARE USING THE -1 TRICK SINCE WE DISCOVERED THAT THE OUTPUT IS REVERSED
# WITH RESPECT TO THE GT
x_new, P_new = self._kf.predict(xt, self._covar_P, dx)
if not reversed:
y_predict[ix] = z_mean + max(min(y_training), min(max(y_training), x_new[0])) * z_std
y_lower[ix] = (z_mean + x_new[0] * z_std) - (P_new[0, 0] * z_std) / 2
y_upper[ix] = (z_mean + x_new[0] * z_std) + (P_new[0, 0] * z_std) / 2
else :
y_predict[ix] = z_mean - max(min(y_training), min(max(y_training), x_new[0])) * z_std
y_lower[ix] = (z_mean - x_new[0] * z_std) - (P_new[0, 0] * z_std) / 2
y_upper[ix] = (z_mean - x_new[0] * z_std) + (P_new[0, 0] * z_std) / 2
return y_predict, y_lower, y_upper
else:
msg = '{}: The LTI-MODEL for the class must be created or loaded before using it!'.format(self.__name__)
logging.error(msg)
raise Exception(msg)
def get_gpt_models(self):
if self._is_ready:
return self._gpt_kernel, self._gpt_likelihood, self._gpt_model
else:
msg = '{}: Model must be loaded or trained before getting it.'.format(self.__name__)
logging.error(msg)
raise Exception
def dump_model(self, saving_path: str):
if self._is_ready:
try:
model_name = ''.join([self.__name__, '.pth']) # Create the name for the model
save_path = os.path.join(saving_path, model_name) # Create the saving path
torch.save(self._gpt_model.state_dict(), save_path) # Save the model
except Exception as e:
logging.warning('An error occur while saving the model: {}, '
'the error is: \n\t> {}'.format(self.__name__, e))
else:
logging.warning('Unable to save the model {}: it is not instantiated.'.format(self.__name__))
def load_model(self, model_path: str):
if os.path.exists(model_path):
state_dict = torch.load(model_path) # Load the state_dictionary
self._gpt_model.load_state_dict(state_dict) # Load the state_dictionary into the model
self._gpt_model.eval()
self._gpt_likelihood.eval()
self._create_sde_model() # Create the sde model
self._is_ready = True # Set the regressor ready to work
self._reset_filter() # Reset the previous stored values in the P matrix
else:
msg = 'The model file {} not found '.format(model_path)
logging.error(msg)
raise FileNotFoundError(msg)
@abc.abstractmethod
def _create_sde_model(self):
raise NotImplementedError
| 10,898 | 44.224066 | 119 | py |
GaPT | GaPT-main/base/gapt/kalman/k_filters.py | """
the model used in the kalman Filter:
x[k+1] = Ax[k] + Gu[k] + w[k]
z[k] = Hx[k] + v[n]
where
x is the state vector, y the measurement, w the process noise and v the measurement noise.
The kalman filter assumes that w and v are zero-mean, independent random variables with known
variances:
- E[ww'] = Q
- E[vv'] = R
Note: symbol " ' " in this description means transposed.
A -> State matrix
G -> Input Matrix
H -> Observation Matrix
"""
import numpy as np
import torch as t
def expm_t(mat):
mat_tensor = t.tensor(mat)
res = t.linalg.matrix_exp(mat_tensor)
return res.numpy()
def expm_f(A):
A2 = np.matmul(A, A)
res = (np.eye(A.shape[0])) + A + 0.5 * A2 + np.matmul(A2, A) / 6
if np.trace(res) > 50:
return np.eye(A.shape[0])
return res
class KalmanSISO:
def __init__(self, F, L, H, Q, R, **kwargs):
self.F = F
self.Q = Q
self.R = R
self.L = L
self.H = H
self.order = kwargs.pop('order')
def calc_qs(self, dt, Qgain_factor):
"""
The method calc_Qs changes the values of the matrix Q according to dt. The dimension
must agree with the one of F: order value is mandatory.
Disclaimer:this is an arbitrary method to evaluate/update related
to a specific task -> GSSM and kalman Filter.
"""
Qs = np.zeros((self.order * 2, self.order * 2))
Qs[:self.order, :self.order] = self.F
Qs[self.order:self.order * 2, self.order:self.order * 2] = -self.F.T
Qs[:self.order, self.order:self.order * 2] = np.matmul(self.L, self.L.T) * self.Q
Qs = expm_f(Qs * np.linalg.norm(dt))
Qs = np.matmul(Qs[:self.order, self.order:self.order * 2], expm_f(self.F * np.linalg.norm(dt)).T)
return Qs * Qgain_factor
def predict(self, x_k, P, delta_t, Qgain_factor=1.0):
"""
F = exp(A*dt)`
x_pred = F*x
P_pred = F*P*F' + Q
"""
F = expm_f(self.F * delta_t)
x_pred = np.matmul(F, x_k)
P_pred = np.matmul(np.matmul(F, P), np.transpose(F)) + self.calc_qs(delta_t, Qgain_factor=Qgain_factor)
return x_pred, P_pred
def update(self, z, x_pred, P_pred, Rgain_factor=1.0):
n, S = self._innovation(x_pred, z, P_pred, Rgain_factor)
return self._innovation_update(x_pred, P_pred, n, S)
def _innovation(self, x_pred, z, P_pred, Rgain_factor):
"""
nu = z - H*x_pred (Innovation) -> It represents the error between the estimate and the measurement
S = R + H*P_pred*H' (Innovation Covariance) -> Needed to evaluate the new K
"""
nu = z - np.matmul(self.H, x_pred)
S = np.asmatrix((self.R * Rgain_factor) + np.matmul(np.matmul(self.H, P_pred), np.transpose(self.H)))
return nu, S
def _innovation_update(self, x_pred, P_pred, nu, S):
"""
K = P_pred*H' * inv(S) (kalman Gain) -> equivalent to K = (P_k * H) / (H*P*H' + R )
x_new = x_pred + K*Nu (New State)
P_new = P_pred - K*S*K' (New Covariance)
"""
K = np.matmul(np.matmul(P_pred, np.transpose(self.H)), np.linalg.inv(S))
x_new = x_pred + K * nu
P_new = P_pred - np.matmul(np.matmul(K, S), np.transpose(K))
return x_new, P_new
class KalmanMISO:
def __init__(self, F, L, H, Q, R, order: int, repeat: int, **kwargs):
self.F = F
self.Q = Q
self.R = R
self.L = L
self.repeat = repeat
self.order = order
new_H = np.zeros((1, H.shape[1] * self.repeat))
# new_R = np.zeros((R.shape[0]*self.repeat,R.shape[1]*self.repeat))
avg_weight = [0.6, 0.05, 0.05, 0.05, 0.05]
for i in range(self.repeat):
new_H[0, H.shape[1] * i:H.shape[1] * (i + 1)] = H * avg_weight[i]
# new_R[R.shape[0]*i:R.shape[0]*(i+1),R.shape[1]*i:R.shape[1]*(i+1)] = R
self.H = new_H
def calc_qs(self, dt):
Qs = np.zeros((self.order * 2, self.order * 2))
Qs[:self.order, :self.order] = self.F
Qs[self.order:self.order * 2, self.order:self.order * 2] = -self.F.T
Qs[:self.order, self.order:self.order * 2] = np.matmul(self.L, self.L.T) * self.Q
Q_total = np.zeros((self.order * self.repeat, self.order * self.repeat))
for i in range(self.repeat):
if i == 0:
c = 0.01
else:
c = 0.01
Qs2 = expm_f(Qs * np.linalg.norm(dt[i]) * c)
Q_total[self.order * i:self.order * (i + 1), self.order * i:self.order * (i + 1)] = \
np.matmul(Qs2[:self.order, self.order:self.order * 2], expm_f(self.F * np.linalg.norm(dt[i])).T)
return Q_total * 0.7
def predict(self, x_k, P, delta_t):
"""
F = exp(A*dt)`
x_pred = F*x
P_pred = F*P*F' + Q
"""
F_total = np.zeros((self.F.shape[0] * self.repeat, self.F.shape[1] * self.repeat))
for i in range(self.repeat):
F = expm_f(self.F * np.linalg.norm(delta_t[i]))
F_total[F.shape[0] * i:F.shape[0] * (i + 1), F.shape[1] * i:F.shape[1] * (i + 1)] = F
x_pred = np.matmul(F_total, x_k)
P_pred = np.matmul(np.matmul(F_total, P), np.transpose(F_total)) + self.calc_qs(delta_t)
return x_pred, P_pred
def _innovation(self, x_pred, z, P_pred, H_custom=None):
"""
nu = z - H*x_pred (Innovation) -> It represents the error between the estimate and the measurement
S = R + H*P_pred*H' (Innovation Covariance) -> Needed to evaluate the new K
"""
if H_custom is None:
nu = z - np.matmul(self.H, x_pred)
S = np.asmatrix(self.R + np.matmul(np.matmul(self.H, P_pred), np.transpose(self.H)))
else:
nu = z - np.matmul(H_custom, x_pred)
S = np.asmatrix(self.R + np.matmul(np.matmul(H_custom, P_pred), np.transpose(H_custom)))
return nu, S
def _innovation_update(self, x_pred, P_pred, nu, S, H_custom=None):
"""
K = P_pred*H' * inv(S) (kalman Gain) -> equivalent to K = (P_k * H) / (H*P*H' + R )
x_new = x_pred + K*Nu (New State)
P_new = P_pred - K*S*K' (New Covariance)
"""
if H_custom is None:
K = np.matmul(np.matmul(P_pred, np.transpose(self.H)), np.linalg.inv(S))
x_new = x_pred + K * nu
P_new = P_pred - np.matmul(np.matmul(K, S), np.transpose(K))
else:
K = np.matmul(np.matmul(P_pred, np.transpose(H_custom)), np.linalg.inv(S))
x_new = x_pred + K * nu
P_new = P_pred - np.matmul(np.matmul(K, S), np.transpose(K))
return x_new, P_new
def update(self, z, x_pred, P_pred, H_custom=None):
n, S = self._innovation(x_pred, z, P_pred, H_custom)
return self._innovation_update(x_pred, P_pred, n, S, H_custom)
| 6,890 | 37.932203 | 112 | py |
GaPT | GaPT-main/base/gapt/kernels/base_kernel.py | import abc
import torch as t
from collections import namedtuple
ContinuousDiscreteModel = namedtuple("ContinuousDiscreteModel", ["P0", "F", "L", "H", "Q"])
class BaseKernel(metaclass=abc.ABCMeta):
def __init__(self, t0: float = 0., **_kwargs):
"""
Parameters:
-----------
t0: float, optional
"""
self.t0 = t0
self.dtype = t.float64
@abc.abstractmethod
def get_sde(self) -> ContinuousDiscreteModel:
"""
Creates the linear time invariant continuous discrete system associated to the stationary kernels at hand
Returns
-------
sde: ContinuousDiscreteModel
The associated LTI model
"""
| 715 | 24.571429 | 113 | py |
GaPT | GaPT-main/base/gapt/kernels/periodic/k_periodic.py | import numpy as np
import torch as t
import math
from typing import Tuple, Union, List
from scipy.special import factorial, comb
from base.gapt.kernels.base_kernel import BaseKernel, ContinuousDiscreteModel
class Periodic(BaseKernel):
"""
The periodic family of kernels. The canonical form (based on the
SquaredExponential kernels) can be found in Equation (47) of
D.J.C.MacKay. Introduction to Gaussian processes. In C.M.Bishop, editor,
Neural Networks and Machine Learning, pages 133--165. Springer, 1998.
The following implementation is inspired by the procedure explained by Arno Solin and
Simo sarkka: 'Explicit Link Between Periodic Covariance Functions
and State Space Models'
https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.647.357&rep=rep1&type=pdf
"""
def __init__(self, lengthscale_base,
lengthscale_per,
period_per: Union[float, List[float]] = 1.0,
variance: float = 1.0,
**kwargs):
super().__init__(t0=0.)
self._order = kwargs.pop('order', 6)
self.lengthscale_base = np.float64(lengthscale_base)
self.lengthscale_per = np.float64(lengthscale_per)
self.variance = np.float64(variance)
self.period_per = np.float64(period_per)
def _get_offline_coeffs(self) -> Tuple[t.tensor, t.tensor, t.tensor]:
"""
Get coefficients which are independent of parameters (ell, sigma, and period). That are, fixed.
Returns
-------
b: np.ndarray
K: np.ndarray
div_facto_K: np.ndarray
"""
N = self._order
r = np.arange(0, N + 1)
J, K = np.meshgrid(r, r)
div_facto_K = 1 / factorial(K)
# Get b(K, J)
b = 2 * comb(K, np.floor((K - J) / 2) * (J <= K)) / \
(1 + (J == 0)) * (J <= K) * (np.mod(K - J, 2) == 0)
# convert to Tensors
b = t.tensor(b, dtype=self.dtype)
K = t.tensor(K, dtype=self.dtype)
div_facto_K = t.tensor(div_facto_K, dtype=self.dtype)
return b, K, div_facto_K
def get_sde(self) -> ContinuousDiscreteModel:
w0 = 2.0 * math.pi / self.period_per
lengthscales = self.lengthscale_base + self.lengthscale_per
N = self._order
# Prepare offline fixed coefficients
b, K, div_facto_K = self._get_offline_coeffs()
# F
op_F = t.tensor([[-self.lengthscale_base, -w0], [w0, -self.lengthscale_base]], dtype=self.dtype)
op_diag = t.arange(0, N + 1).to(self.dtype) * t.eye(N + 1, dtype=self.dtype)
F = t.kron(op_diag, op_F)
# L
L = t.eye(2*(N+1), dtype=self.dtype)
# Pinf
q2_aT = b * lengthscales ** (-2 * K) * div_facto_K * \
t.exp(-t.tensor(lengthscales, dtype=self.dtype) ** (-2)) * 2 ** (-K) * self.variance
q2T = t.sum(q2_aT, dim=0) * t.eye(N + 1, dtype=self.dtype)
Pinf = t.kron(q2T, t.eye(N, dtype=self.dtype))
# Q: as F, is computed considering that we use the Matern to generate the quasi periodic version
# Brownian motion ( from Matern32)
d = 2
qq = t.sum(q2_aT, dim=0)[0] * t.eye(N, dtype=self.dtype)
lamda = math.sqrt(2 * d - 1) / t.tensor(lengthscales, dtype=self.dtype)
mat_brown_n = t.tensor((2 * lamda) ** (2 * d - 1) * self.variance *
math.factorial(d - 1) ** 2 / math.factorial(2 * d - 2), dtype=self.dtype)
Q = t.kron(mat_brown_n*t.eye(N+1), qq)
# H
H = t.kron(t.ones((1, N + 1), dtype=self.dtype), t.tensor([[1, 0]], dtype=self.dtype))
return ContinuousDiscreteModel(Pinf, F, L, H, Q)
| 3,700 | 37.957895 | 104 | py |
GaPT | GaPT-main/base/gapt/kernels/rbf/k_rbf.py | import numpy as np
import torch
import torch as t
import math
from typing import Tuple
from tools.math_utils import solve_lyap_vec, balance_ss
from base.gapt.kernels.base_kernel import BaseKernel, ContinuousDiscreteModel
class RBF(BaseKernel):
"""
The radial basis function (RBF) or squared exponential kernels. The kernels equation is
k(r) = σ² exp{-½ r²}
where:
r is the Euclidean distance between the input points, scaled by the lengthscales parameter ℓ.
σ² is the variance parameter
Functions drawn from a GP with this kernels are infinitely differentiable!
"""
# TODO: remember to update when mixingkernel classes are implemented
def __init__(self, variance=1.0, lengthscales=1.0, t0: float = 0., **kwargs):
super().__init__(t0=t0)
self.variance = t.tensor(variance, dtype=self.dtype)
self.lengthscales = t.tensor(lengthscales, dtype=self.dtype)
self._order = kwargs.pop('order', 6)
self._balancing_iter = kwargs.pop('balancing_iter', 5)
def get_unscaled_rbf_sde(self) -> Tuple[np.ndarray, ...]:
"""Get un-scaled RBF SDE.
Returns
-------
F, L, H, Q : np.ndarray
SDE coefficients.
"""
order = self._order
B = math.sqrt(2 * math.pi)
A = np.zeros((2 * order + 1,), dtype=np.dtype(float))
i = 0
for k in range(order, -1, -1):
A[i] = 0.5 ** k / math.factorial(k)
i = i + 2
q = B / np.polyval(A, 0)
LA = np.real(A / (1j ** np.arange(A.size - 1, -1, -1, dtype=np.dtype(float))))
AR = np.roots(LA)
GB = 1
GA = np.poly(AR[np.real(AR) < 0])
GA = GA / GA[-1]
GB = GB / GA[0]
GA = GA / GA[0]
F = np.zeros((GA.size - 1, GA.size - 1), dtype=np.dtype(float))
F[-1, :] = -GA[:0:-1]
F[:-1, 1:] = np.eye(GA.size - 2, dtype=np.dtype(float))
L = np.zeros((GA.size - 1, 1), dtype=np.dtype(float))
L[-1, 0] = 1
H = np.zeros((1, GA.size - 1), dtype=np.dtype(float))
H[0, 0] = GB
return F, L, H, q
def get_sde(self) -> ContinuousDiscreteModel:
F_, L_, H_, q_ = self.get_unscaled_rbf_sde()
F = t.tensor(F_, dtype=self.dtype)
L = t.tensor(L_, dtype=self.dtype)
H = t.tensor(H_, dtype=self.dtype)
q = t.tensor(q_, dtype=self.dtype)
dim = F.shape[0]
ell_vec = self.lengthscales ** t.arange(dim, 0, -1, dtype=self.dtype)
update_indices = t.tensor([[dim - 1, k] for k in range(dim)]).to(torch.long)
F[update_indices[:, 0], update_indices[:, 1]] = F[-1, :] / ell_vec
H = H / (self.lengthscales ** dim)
Q = self.variance * self.lengthscales * t.reshape(q, (1, 1))
Fb, Lb, Hb, Qb = balance_ss(F, L, H, Q, n_iter=self._balancing_iter)
# Fb, Lb, Hb, Qb = F, L, H, Q
Pinf = solve_lyap_vec(Fb, Lb, Qb)
return ContinuousDiscreteModel(Pinf, Fb, Lb, Hb, Q)
| 3,003 | 31.652174 | 99 | py |
GaPT | GaPT-main/base/gapt/kernels/matern/base_matern.py | from base.gapt.kernels.base_kernel import BaseKernel
import numpy as np
import torch as t
from scipy.special import binom
import math
from typing import Tuple
class Matern(BaseKernel):
"""
This class inherit from the base class "BaseKernel"
implementing the Matern kernels-based methods
"""
def __init__(self, t0):
super().__init__(t0=t0)
def _get_transition_matrix(self, lamda: t.Tensor, d: int) -> t.Tensor:
"""
Description
----------
Method to calculate the F Matrix in the companion form of the LTI-SDE
"""
lamda = t.tensor(lamda.numpy())
F = t.diag(t.ones((d - 1,), dtype=self.dtype), 1)
binomial_coeffs = binom(d, np.arange(0, d, dtype=int)).astype(np.float64)
binomial_coeffs = t.tensor(binomial_coeffs, dtype=self.dtype)
lambda_powers = lamda ** np.arange(d, 0, -1, dtype=np.float64)
update_indices = t.tensor([[d - 1, k] for k in range(d)])
F[update_indices[:, 0], update_indices[:, 1]] -= lambda_powers * binomial_coeffs
return F
def _get_brownian_cov(self, variance: t.Tensor, lamda: t.Tensor, d) -> t.Tensor:
"""
Description
----------
Method to calculate the q noise
"""
q = (2 * lamda) ** (2 * d - 1) * variance * math.factorial(d - 1) ** 2 / math.factorial(2 * d - 2)
return q * t.eye(1, dtype=self.dtype)
def _get_matern_sde(self, variance: t.Tensor, lengthscales: t.Tensor, d: int) -> Tuple[t.Tensor, ...]:
"""
TODO: write description
Parameters
----------
variance
lengthscales
d: int
the exponent of the Matern kernels plus one half
for instance Matern32 -> 2, this will be used as the dimension of the latent SSM
Returns
-------
F, L, H, Q: tuple of t.Tensor
Parameters for the LTI sde
"""
lamda = math.sqrt(2 * d - 1) / lengthscales
F = self._get_transition_matrix(lamda, d)
L = t.eye(d, dtype=self.dtype)[d-1, :].unsqueeze(1)
H = t.eye(d, dtype=self.dtype)[:, 0].unsqueeze(1).t()
Q = self._get_brownian_cov(variance, lamda, d)
return F, L, H, Q
| 2,247 | 34.125 | 106 | py |
GaPT | GaPT-main/base/gapt/kernels/matern/k_matern32.py | import torch as t
import logging
from base.gapt.kernels.base_kernel import ContinuousDiscreteModel
from base.gapt.kernels.matern.base_matern import Matern
from tools.math_utils import solve_lyap_vec, balance_ss
class Matern32(Matern):
"""
The Matern 3/2 kernels. Functions drawn from a GP with this kernels are once
differentiable. The kernels equation is
k(r) = σ² (1 + √3r) exp{-√3 r}
where:
r is the Euclidean distance between the input points, scaled by the lengthscales parameter ℓ,
σ² is the variance parameter.
"""
# TODO: remember to update when mixingkernel classes are implemented
def __init__(self, variance=1.0, lengthscales=1.0, t0: float = 0., **kwargs):
super().__init__(t0=t0)
self.variance = t.tensor(variance, dtype=self.dtype)
self.lengthscales = t.tensor(lengthscales, dtype=self.dtype)
self._balancing_iter = kwargs.pop('balancing_iter', 0)
def get_sde(self) -> ContinuousDiscreteModel:
F, L, H, Q, = self._get_matern_sde(self.variance, self.lengthscales, d=2)
Q = t.reshape(Q, (1, 1))
if self._balancing_iter > 0:
Fb, Lb, Hb, Qb = balance_ss(F, L, H, Q, n_iter=self._balancing_iter)
P_infty = solve_lyap_vec(Fb, Lb, Qb)
return ContinuousDiscreteModel(P_infty, Fb, Lb, Hb, Qb)
elif self._balancing_iter == 0:
P_infty = solve_lyap_vec(F, L, Q)
return ContinuousDiscreteModel(P_infty, F, L, H, Q)
else:
err_msg = 'The value of argument balancing_iter {} must be integer >= 0'.format(self._balancing_iter)
logging.error(err_msg)
raise ValueError(err_msg)
| 1,693 | 40.317073 | 113 | py |
GaPT | GaPT-main/base/gapt/kernels/matern/k_matern52.py | import torch as t
import logging
from base.gapt.kernels.base_kernel import ContinuousDiscreteModel
from base.gapt.kernels.matern.base_matern import Matern
from tools.math_utils import solve_lyap_vec, balance_ss
class Matern52(Matern):
"""
The Matern 5/2 kernels. Functions drawn from a GP with this kernels are twice
differentiable. The kernels equation is
k(r) = σ² (1 + √5r + 5/3r²) exp{-√5 r}
where:
r is the Euclidean distance between the input points, scaled by the lengthscales parameter ℓ,
σ² is the variance parameter.
"""
def __init__(self, variance=1.0, lengthscales=1.0, t0: float = 0., **kwargs):
super().__init__(t0=t0)
self.variance = t.tensor(variance, dtype=self.dtype)
self.lengthscales = t.tensor(lengthscales, dtype=self.dtype)
self._balancing_iter = kwargs.pop('balancing_iter', 0)
def get_sde(self) -> ContinuousDiscreteModel:
F, L, H, Q, = self._get_matern_sde(self.variance, self.lengthscales, d=3)
Q = t.reshape(Q, (1, 1, 1))
if self._balancing_iter > 0:
Fb, Lb, Hb, Qb = balance_ss(F, L, H, Q, n_iter=self._balancing_iter)
P_infty = solve_lyap_vec(Fb, Lb, Qb)
return ContinuousDiscreteModel(P_infty, Fb, Lb, Hb, Qb)
elif self._balancing_iter == 0:
P_infty = solve_lyap_vec(F, L, Q)
return ContinuousDiscreteModel(P_infty, F, L, H, Q)
else:
err_msg = 'The value of argument balancing_iter {} must be integer >= 0'.format(self._balancing_iter)
logging.error(err_msg)
raise ValueError(err_msg)
| 1,631 | 39.8 | 113 | py |
GaPT | GaPT-main/base/gapt/kernels/matern/k_matern12.py | import torch as t
from base.gapt.kernels.base_kernel import ContinuousDiscreteModel
from base.gapt.kernels.matern.base_matern import Matern
class Matern12(Matern):
"""
The Matern 1/2 kernels. Functions drawn from a GP with this kernels are not
differentiable anywhere. The kernels equation is
k(r) = σ² exp{-r}
where:
r is the Euclidean distance between the input points, scaled by the lengthscales parameter ℓ.
σ² is the variance parameter
"""
# TODO: remember to update when mixingkernel classes are implemented
def __init__(self, variance=1.0, lengthscales=1.0, t0: float = 0., **kwargs):
super().__init__(t0=t0)
self.variance = t.tensor(variance, dtype=self.dtype)
self.lengthscales = t.tensor(lengthscales, dtype=self.dtype)
def get_sde(self) -> ContinuousDiscreteModel:
F, L, H, Q = self._get_matern_sde(self.variance, self.lengthscales, 1)
P_infty = (t.tensor((self.variance,), dtype=self.dtype)).unsqueeze(1)
return ContinuousDiscreteModel(P_infty, F, L, H, Q)
| 1,069 | 35.896552 | 98 | py |
GaPT | GaPT-main/base/gpytgp/gppyt_model.py | import gpytorch
import torch
class GPYTModel(gpytorch.models.ExactGP):
"""
Base ExactGPModel:
- train_x: N x D training tensor data features
- train_y N x M training tensor data labels
- likelihood: gpytorch.likelihoods
- kernels: gpytorch.kernels
"""
def __init__(self, train_x: torch.tensor, train_y: torch.tensor,
likelihood: gpytorch.likelihoods, kernel: gpytorch.kernels):
super(GPYTModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = kernel
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
| 788 | 29.346154 | 77 | py |
GaPT | GaPT-main/base/gpytgp/gppyt_trainer.py | """
PYTORCH TRAINERS IMPLEMENTATIONS
"""
import torch
import logging
from tqdm import tqdm
class GPYTrainer:
"""
Trainer class for finding the hyperparameters
of Gaussian processes implemented in GPytorch
"""
def __init__(self, loss, gp_model, likelihood, train_loader, optimizer, config, verbose=False):
self.loss = loss
self.model = gp_model
self.likelihood = likelihood
self.train_loader = train_loader
self.optimizer = optimizer
self.config = config
self._verbose = verbose
def _find_optimal_params(self):
# Find optimal model hyper-parameters
self.model.train()
self.likelihood.train()
def train(self):
"""
Full training logic
"""
self._find_optimal_params()
epochs = self.config['epochs']
for epoch in tqdm(range(0, epochs), desc="GPT training", leave=False):
for data, label, _ in self.train_loader:
# TODO ACTUAL PYTORCH IS NOT COMPATIBLE WIT THE VERSION OF CUDA. PYTORCH MUST BE UPDATED
# if torch.cuda.is_available():
# data, label = data.to("cuda"), label.to("cuda")
# else:
# data, label = data.to("cpu"), label.to("cpu")
# TRAINING STEPS
# Zero the gradient
self.optimizer.zero_grad()
# Output from model
targets = self.model(data)
# Calc loss and backprop gradients
loss = -self.loss(targets, label)
# Backprop the gradients
loss.backward()
# Display info
if self._verbose:
logging.info("Iter {}/{} - Loss: {:.3f} noise: {:.3f}".format(epoch + 1, epochs, loss.item(),
self.model.likelihood.noise.item()))
print("Iter {}/{} - Loss: {:.3f} noise: {:.3f}".format(epoch + 1, epochs, loss.item(),
self.model.likelihood.noise.item()))
self.optimizer.step()
| 2,230 | 31.808824 | 119 | py |
GaPT | GaPT-main/base/data/data_loader.py | import logging
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from base.data.data_sampler import CustomSampler
from base.data.base_dataset import BaseDataset
class GPDataLoader(DataLoader):
"""
This dataloader is intended for training the GpyTorch models in the regressor class.
Given a dataset class (Base dataset) and the number of the training points the GPDataloader uses two samplers to
obtain the indexes for the training and the validation. The logic is a little different from classic
implementations of dataloaders in Torch. In particular, the validation split percentage is not present but
replaced with the N. of K training points. There are two possible cases:
- K < n. of total samples:
- sample uniformly k samples from the entire dataset to create the training sampler,
- remove the k samples from the indexes to create the validation sampler,
- use the samplers to obtain the training and validation dataloader.
- K == n. of total samples:
- get all the indexes of the dataset,
- use them to implement the training and validation dataloader.
Parameters
----------
dataset : dataset, instance of the Base dataset class.
k_training_points: number of k points used to uniformly sample the indexes of the dataset.
num_workers: num. of workers used for the training (same as for torch. non tested).
collate_fn: collate function to apply, by default we use default_collate
Returns
-------
The class implement a dataloader that can be passed as argument to the trainer.
Calling the function "get_valid_dataloader" an instance of the validation dataloader (with the validation sampler)
will be returned.
"""
def __init__(self, dataset: BaseDataset, k_training_points: int, num_workers=1, collate_fn=default_collate):
batch_size = dataset.__len__() # In GP regressor the datasampler will do the job for us
self.k_points = k_training_points
self.batch_idx = 0
self.n_samples = len(dataset)
self.shuffle = False
self.training_sampler, self.testing_sampler = self._get_samplers()
self.init_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': collate_fn,
'num_workers': num_workers
}
super().__init__(sampler=self.training_sampler, **self.init_kwargs)
def _get_samplers(self):
if self.k_points < self.n_samples:
"""
Sample k values from the entire dataset. And use them as training values.
"""
# Get the indexes of the values in the dataset.
idx_full = np.arange(self.n_samples)
# Uniform distribution for the samples picked for the training.
idx_random_dist = np.random.uniform(size=self.k_points, low=idx_full[0], high=idx_full[-1])
# Sort the indexes for the training.
train_ix = np.sort(np.rint(idx_random_dist).astype(int))
# Generate the sampler with the training values.
training_sampler = CustomSampler(list(train_ix))
# Get the indexes of the dataset except the ones used for the training.
vaild_ix = np.delete(idx_full, train_ix)
# Generate the sampler with the validation values.
validation_sampler = CustomSampler(list(vaild_ix))
return training_sampler, validation_sampler
else:
if self.k_points > self.n_samples:
msg = 'Number of points selected for training ({}) are more than the ' \
'samples in the dataset ({}), setting them equals to the entire dataset'\
.format(self.k_points, self.n_samples)
logging.debug(msg)
self.k_points = self.n_samples
# The training and the validation dataloader will contain all the values of the dataset.
idx_full = np.arange(self.n_samples)
training_sampler = CustomSampler(list(idx_full))
validation_sampler = CustomSampler(list(idx_full))
return training_sampler, validation_sampler
def get_valid_dataloader(self):
# Return the validation dataloader.
return DataLoader(sampler=self.testing_sampler, **self.init_kwargs)
| 4,460 | 43.168317 | 118 | py |
GaPT | GaPT-main/base/data/base_dataset.py | from torch.utils.data import Dataset
from abc import ABC, abstractmethod
import numpy as np
from tools.dsp_utils import SigFilter, Tools
import torch
class BaseDataset(Dataset, ABC):
"""
Base class for all the Dataset. The format is compliant with the dataloader schemes commonly used in
Pytorch.
Args:
transform (callable, optional): Optional transform to be applied
on a sample.
*args, *kwargs: optional arguments that must be implemented on the real class.
Note: In this work we are referring to time-series data, so we included the timestamp as returned value.
"""
def __init__(self, dataset_label: str, *args, **kwargs):
self.X = None
self.Y = None
self.timestamp = None
self._init_dataset()
self.__name__ = dataset_label
super().__init__()
def __len__(self):
return len(self.X)
# The _init_dataset method is used to adjust the data according to the application.
@abstractmethod
def _init_dataset(self, **kwargs):
"""
This abstract method should set the three float tensors
- X (samples)
- Y (labels)
- timestamp (time values)
"""
raise NotImplementedError
def __getitem__(self, index):
sample = self.X[index]
label = self.Y[index]
ts = self.timestamp[index]
return sample, label, ts
def select_subset(self, start_index, end_index):
"""
This method is used to select a subset of the orginal dataset.
Useful if you want to train on a particular subset.
"""
# Restore the original format of the dataset
if isinstance(start_index, type(None)) or isinstance(end_index, type(None)):
self._init_dataset()
else:
self._init_dataset()
if end_index > len(self) or end_index == -1:
end_index = len(self)
if start_index < 0:
start_index = 0
self.X = self.X[start_index: end_index]
self.Y = self.Y[start_index: end_index]
self.timestamp = self.timestamp[start_index: end_index]
| 2,227 | 29.108108 | 109 | py |
GaPT | GaPT-main/base/data/data_sampler.py | from torch.utils.data import Sampler
from typing import Iterator, Sequence
class CustomSampler(Sampler[int]):
r"""Samples elements from a given list of indices, without replacement.
Args:
indices (sequence): a sequence of indices
generator (Generator): Generator used in sampling.
"""
indices: Sequence[int]
def __init__(self, indices: Sequence[int], generator=None) -> None:
self.indices = indices
self.generator = generator
def __iter__(self) -> Iterator[int]:
for i in range(len(self.indices)):
yield self.indices[i]
def __len__(self) -> int:
return len(self.indices)
| 664 | 27.913043 | 75 | py |
GaPT | GaPT-main/experiments/timing/inference_time_miso_rt.py | """
The script will evaluate the computational time for one sample inference for both the GPyTorch and the
KF models.
miso
"""
import copy
import gc
import logging
import numpy
import time
import numpy as np
import pandas as pd
import torch.random
from tqdm import tqdm
import json
from pathlib import Path
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
from base.data.data_loader import GPDataLoader
from configs import OUTPUT_DIR
from configs.config_parsers import ArgParser, LoggerParser, MatplotlibParser
from model import datasets
from model import regressors
from tools.file_utils import Folder
##############################
# LOAD CONFIG #
##############################
parser = ArgParser()
config = parser.args
json_config = parser.json
dataset_config_train = config.DATASET
regressors_config = config.REGRESSORS
seeds_config = config.SEEDS
gpytorch_config = config.GPT_MODEL
session_id = config.SESSION_NAME
log_config = config.LOG
kpts_config = config.KPOINTS
measurmements_config = config.N_MEASUREMENTS
# SESSION NAME
session_name = session_id
# Fix the Seed for numpy and pytorch
np.random.seed(seeds_config["numpy"])
torch.manual_seed(seeds_config["torch"])
# OUTPUT FOLDERS
run_id = ''.join([session_name, '_RT_MISO', datetime.now().strftime(r'%m%d_%H%M%S')])
session_folder = Folder(OUTPUT_DIR, run_id)
folder_csv_out = Folder(session_folder.path, 'inf_time_benchmark')
folder_json_out = Folder(session_folder.path, 'configuration')
# LOGGING
LoggerParser.setup_logging(save_dir=session_folder.path)
logging.info("Session: {} started".format(session_name))
# MATLAB
matplt_conf = MatplotlibParser.get_matplot_conf()
plt.rcParams["figure.figsize"] = (matplt_conf["figsize"][0], matplt_conf["figsize"][1])
matplotlib.rc("font", **matplt_conf["font"])
matplotlib.rc("axes", **matplt_conf["axes"])
matplotlib.rc("figure", **matplt_conf["figure"])
matplotlib.rc("xtick.major", **matplt_conf["xtick"]["major"])
matplotlib.rc("ytick.major", **matplt_conf["ytick"]["major"])
matplotlib.rcParams["pdf.fonttype"] = matplt_conf["pdf_fonttype"]
matplotlib.rcParams["ps.fonttype"] = matplt_conf["ps_fonttype"]
matplot_colors = matplt_conf["colors"]
matplot_line_tick = matplt_conf["line_thickness"]
matplot_mark_size = matplt_conf["marker_size"]
matplot_line_style = matplt_conf["extra_linestyles"]
matplot_mark_style = matplt_conf["markers"]
#############################################
# LOAD THE DATASET #
#############################################
# TRAINING
# TRAINING
# Get the parameters of the training dataset
dataset_args_tr = dataset_config_train["train"]["args"]
dataset_model_tr = dataset_config_train["train"]["class"]
dataset_class_tr = getattr(datasets, dataset_model_tr)
# Instantiate the training dataset
training_dataset = dataset_class_tr(**dataset_args_tr)
############################################
# NORMALIZE DATASET #
#############################################
# Get the mean and std
mean_y_tr = torch.mean(training_dataset.Y)
std_y_tr = torch.std(training_dataset.Y)
# Z score on the first column and the
for i in range(training_dataset.X[:, :].shape[1]):
std_x_0_tr, mean_x_0_tr = torch.std_mean(training_dataset.X[:, i])
if i > 0:
std_x_0_tr = std_x_0_tr * 10
training_dataset.X[:, i] = (training_dataset.X[:, i] - mean_x_0_tr) / std_x_0_tr
training_dataset.Y = (training_dataset.Y - mean_y_tr) / std_y_tr
#############################################
# PREDICTIONS KF #
#############################################
# TRAIN
# get the training dataset
x_tr_kal = training_dataset.X.numpy()
y_tr_kal = training_dataset.Y.numpy()
# Sort the indexes
index_tr = np.argsort(x_tr_kal[:, 0])
# Get the rollback indexes
rollback_indexes_tr = np.empty_like(index_tr)
rollback_indexes_tr[index_tr] = np.arange(index_tr.size)
# Sort the training dataset used for the prediction matching
x_tr_kal_sort = x_tr_kal[index_tr]
y_tr_kal_sort = y_tr_kal[index_tr]
##############################
# K-PTS LIST #
##############################
# DEFINE POINTS
pt_list = kpts_config["pt_list"]
k_training_pts_list = np.array(pt_list)
##############################
# BENCHMARK #
##############################
# Create a list of models
pred_list = []
# Get the list of model used for the test
regressor_list = regressors_config['used']
for regressor_name in regressor_list:
model_class = getattr(regressors, regressor_name)
model = model_class(id_model="", input_dim=len(training_dataset.x_column))
model_order = None
model_name = model.__name__.split("_")[0]
pred_list.append([model_name, model, np.zeros((len(k_training_pts_list), 2)), model_order])
for ix_pts in tqdm(range(0, len(k_training_pts_list)), desc="Iterating K pts.", colour="green", leave=True):
k_pts = k_training_pts_list[ix_pts]
# 1) Create a Dataloader
dataloader = GPDataLoader(training_dataset, k_training_points=int(k_pts))
# 2) Train the models and perform the predictions for both the GPyTorch and the KF implementation
for model_ix in tqdm(range(0, len(pred_list)), desc="Processing Models", colour="blue", leave=False):
model = copy.deepcopy(pred_list[model_ix][1])
# a) Train the regressor with the dataloader
model.train_hyperparams(dataloader, gpytorch_config)
# b) Get the dimension of m matrix of the trained model and the got model to perform the predictions
pred_list[model_ix][3] = model._pssgp_cov.get_sde()[1].shape[
0] # TODO: ACCESS TO PRIVATE MEMBER, MAYBE CREATE A GETTER
gpt_kernel, gpt_likelihood, gpt_model = model.get_gpt_models()
# c) Extract the dataset points used for the benchmark
x_test_pt = training_dataset.X[0:k_pts+1]
y_test_pt = training_dataset.Y[0:k_pts+1]
# b) predict with the GPytorch just one sample model and get the pred time, if the num. of the samples
# are more, pytorch will parallelize the computation and the comparison with the sequential Kalman filter
# wouldn't be fair.
# c) Perform multiple measurments, remove the min,max values e store the mean values
n_measurements = measurmements_config["n_measurments"]
if n_measurements < 3:
msg = "The number of measurements required to estimate the inference time must be >10. Received:{}" \
.format(n_measurements)
logging.error(msg)
raise ValueError(msg)
pred_time_gpt = numpy.zeros(n_measurements)
pred_time_kf = numpy.zeros(n_measurements)
for n_meas in tqdm(range(n_measurements),
desc=" GpyTorch: Performing multiple measurements", colour="purple", leave=False):
# ca) perform inference with GPyTorch
# single point test (first)
x_val = x_test_pt[0].unsqueeze(1).T
t_now = time.time()
with torch.no_grad():
_ = gpt_likelihood(gpt_model(x_val))
pred_time_gpt[n_meas] = time.time() - t_now
del model._gpt_kernel, model._gpt_likelihood, model._gpt_model
for n_meas in tqdm(range(n_measurements),
desc=" KF: Performing multiple measurements", colour="yellow", leave=False):
# cb) perform inference with Kernels
t_now = time.time()
_, _, _ = model.predict_kf_miso(x_training=x_tr_kal_sort[0:k_pts+1],
y_training=y_tr_kal_sort[0:k_pts+1],
z_mean=mean_y_tr.numpy(),
z_std=std_y_tr.numpy(),
x_test=x_test_pt.numpy()[0],
reversed=False)
pred_time_kf[n_meas] = time.time() - t_now
pred_time_gpt = np.delete(pred_time_gpt, [np.argmax(pred_time_gpt), np.argmin(pred_time_gpt)])
pred_time_kf = np.delete(pred_time_kf, [np.argmax(pred_time_kf), np.argmin(pred_time_kf)])
pred_list[model_ix][2][ix_pts][0] = np.mean(pred_time_gpt)
pred_list[model_ix][2][ix_pts][1] = np.mean(pred_time_kf)
del model, gpt_kernel, gpt_likelihood, gpt_model
gc.collect()
del dataloader
gc.collect()
##############################
# OUTPUTS #
##############################
# - Create the dataframe
data_frame = pd.DataFrame(columns=['K_pts'])
data_frame['K_pts'] = k_training_pts_list
for model in pred_list:
data_frame[model[0] + "_GPY"] = model[2][:, 0]
data_frame[model[0] + "_KF"] = model[2][:, 1]
# - Save the dataframe as .csv
data_frame.to_csv(Path(folder_csv_out.path, 'results.csv'), sep=',')
# - Save the config file as .json
with open(Path(folder_json_out.path, 'inference_time_arpl.json'), 'w') as outfile:
json.dump(json_config, outfile)
| 8,998 | 36.032922 | 113 | py |
GaPT | GaPT-main/experiments/timing/inference_time_siso_rt.py | """
The script will evaluate the computational time for one sample inference for both the GPyTorch and the
KF models.
miso
"""
import copy
import gc
import logging
import numpy
import time
import numpy as np
import pandas as pd
import torch.random
from tqdm import tqdm
import json
from pathlib import Path
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
from base.data.data_loader import GPDataLoader
from configs import OUTPUT_DIR
from configs.config_parsers import ArgParser, LoggerParser, MatplotlibParser
from model import datasets
from model import regressors
from tools.file_utils import Folder
##############################
# LOAD CONFIG #
##############################
parser = ArgParser()
config = parser.args
json_config = parser.json
dataset_config_train = config.DATASET
regressors_config = config.REGRESSORS
seeds_config = config.SEEDS
gpytorch_config = config.GPT_MODEL
session_id = config.SESSION_NAME
log_config = config.LOG
kpts_config = config.KPOINTS
measurmements_config = config.N_MEASUREMENTS
# SESSION NAME
session_name = session_id
# Fix the Seed for numpy and pytorch
np.random.seed(seeds_config["numpy"])
torch.manual_seed(seeds_config["torch"])
# OUTPUT FOLDERS
run_id = ''.join([session_name, '_RT_SISO', datetime.now().strftime(r'%m%d_%H%M%S')])
session_folder = Folder(OUTPUT_DIR, run_id)
folder_csv_out = Folder(session_folder.path, 'inf_time_benchmark')
folder_json_out = Folder(session_folder.path, 'configuration')
# LOGGING
LoggerParser.setup_logging(save_dir=session_folder.path)
logging.info("Session: {} started".format(session_name))
# MATLAB
matplt_conf = MatplotlibParser.get_matplot_conf()
plt.rcParams["figure.figsize"] = (matplt_conf["figsize"][0], matplt_conf["figsize"][1])
matplotlib.rc("font", **matplt_conf["font"])
matplotlib.rc("axes", **matplt_conf["axes"])
matplotlib.rc("figure", **matplt_conf["figure"])
matplotlib.rc("xtick.major", **matplt_conf["xtick"]["major"])
matplotlib.rc("ytick.major", **matplt_conf["ytick"]["major"])
matplotlib.rcParams["pdf.fonttype"] = matplt_conf["pdf_fonttype"]
matplotlib.rcParams["ps.fonttype"] = matplt_conf["ps_fonttype"]
matplot_colors = matplt_conf["colors"]
matplot_line_tick = matplt_conf["line_thickness"]
matplot_mark_size = matplt_conf["marker_size"]
matplot_line_style = matplt_conf["extra_linestyles"]
matplot_mark_style = matplt_conf["markers"]
#############################################
# LOAD THE DATASET #
#############################################
# TRAINING
# TRAINING
# Get the parameters of the training dataset
dataset_args_tr = dataset_config_train["train"]["args"]
dataset_model_tr = dataset_config_train["train"]["class"]
dataset_class_tr = getattr(datasets, dataset_model_tr)
# Instantiate the training dataset
training_dataset = dataset_class_tr(**dataset_args_tr)
############################################
# NORMALIZE DATASET #
#############################################
# Get the mean and std
mean_y_tr = torch.mean(training_dataset.Y)
std_y_tr = torch.std(training_dataset.Y)
# Z score on the first column and the
for i in range(training_dataset.X[:, :].shape[1]):
std_x_0_tr, mean_x_0_tr = torch.std_mean(training_dataset.X[:, i])
if i > 0:
std_x_0_tr = std_x_0_tr * 10
training_dataset.X[:, i] = (training_dataset.X[:, i] - mean_x_0_tr) / std_x_0_tr
training_dataset.Y = (training_dataset.Y - mean_y_tr) / std_y_tr
#############################################
# PREDICTIONS KF #
#############################################
# TRAIN
# get the training dataset
x_tr_kal = training_dataset.X.numpy()
y_tr_kal = training_dataset.Y.numpy()
# Sort the indexes
index_tr = np.argsort(x_tr_kal[:, 0])
# Get the rollback indexes
rollback_indexes_tr = np.empty_like(index_tr)
rollback_indexes_tr[index_tr] = np.arange(index_tr.size)
# Sort the training dataset used for the prediction matching
x_tr_kal_sort = x_tr_kal[index_tr]
y_tr_kal_sort = y_tr_kal[index_tr]
##############################
# K-PTS LIST #
##############################
# DEFINE POINTS
pt_list = kpts_config["pt_list"]
k_training_pts_list = np.array(pt_list)
##############################
# BENCHMARK #
##############################
# Create a list of models
pred_list = []
# Get the list of model used for the test
regressor_list = regressors_config['used']
for regressor_name in regressor_list:
model_class = getattr(regressors, regressor_name)
model = model_class(id_model="", input_dim=len(training_dataset.x_column))
model_order = None
model_name = model.__name__.split("_")[0]
pred_list.append([model_name, model, np.zeros((len(k_training_pts_list), 2)), model_order])
for ix_pts in tqdm(range(0, len(k_training_pts_list)), desc="Iterating K pts.", colour="green", leave=True):
k_pts = k_training_pts_list[ix_pts]
# 1) Create a Dataloader
dataloader = GPDataLoader(training_dataset, k_training_points=int(k_pts))
# 2) Train the models and perform the predictions for both the GPyTorch and the KF implementation
for model_ix in tqdm(range(0, len(pred_list)), desc="Processing Models", colour="blue", leave=False):
model = copy.deepcopy(pred_list[model_ix][1])
# a) Train the regressor with the dataloader
model.train_hyperparams(dataloader, gpytorch_config)
# b) Get the dimension of m matrix of the trained model and the got model to perform the predictions
pred_list[model_ix][3] = model._pssgp_cov.get_sde()[1].shape[
0] # TODO: ACCESS TO PRIVATE MEMBER, MAYBE CREATE A GETTER
gpt_kernel, gpt_likelihood, gpt_model = model.get_gpt_models()
# c) Extract the dataset points used for the benchmark
x_test_pt = training_dataset.X[0:k_pts+1]
y_test_pt = training_dataset.Y[0:k_pts+1]
# b) predict with the GPytorch just one sample model and get the pred time, if the num. of the samples
# are more, pytorch will parallelize the computation and the comparison with the sequential Kalman filter
# wouldn't be fair.
# c) Perform multiple measurments, remove the min,max values e store the mean values
n_measurements = measurmements_config["n_measurments"]
if n_measurements < 3:
msg = "The number of measurements required to estimate the inference time must be >10. Received:{}" \
.format(n_measurements)
logging.error(msg)
raise ValueError(msg)
pred_time_gpt = numpy.zeros(n_measurements)
pred_time_kf = numpy.zeros(n_measurements)
for n_meas in tqdm(range(n_measurements),
desc=" GpyTorch: Performing multiple measurements", colour="purple", leave=False):
# ca) perform inference with GPyTorch
# single point test (first)
x_val = x_test_pt[0].unsqueeze(1).T
t_now = time.time()
with torch.no_grad():
_ = gpt_likelihood(gpt_model(x_val))
pred_time_gpt[n_meas] = time.time() - t_now
del model._gpt_kernel, model._gpt_likelihood, model._gpt_model
for n_meas in tqdm(range(n_measurements),
desc=" KF: Performing multiple measurements", colour="yellow", leave=False):
# cb) perform inference with Kernels
t_now = time.time()
_, _, _, = model.predict_kf_mean_cov(x_training=x_test_pt.squeeze().numpy(), y_training=y_test_pt.squeeze().numpy(),
x_test=x_test_pt[0].numpy(), Qgain=0.1, z_mean=1.0, z_std=1.0)
pred_time_kf[n_meas] = time.time() - t_now
pred_time_gpt = np.delete(pred_time_gpt, [np.argmax(pred_time_gpt), np.argmin(pred_time_gpt)])
pred_time_kf = np.delete(pred_time_kf, [np.argmax(pred_time_kf), np.argmin(pred_time_kf)])
pred_list[model_ix][2][ix_pts][0] = np.mean(pred_time_gpt)
pred_list[model_ix][2][ix_pts][1] = np.mean(pred_time_kf)
del model, gpt_kernel, gpt_likelihood, gpt_model
gc.collect()
del dataloader
gc.collect()
##############################
# OUTPUTS #
##############################
# - Create the dataframe
data_frame = pd.DataFrame(columns=['K_pts'])
data_frame['K_pts'] = k_training_pts_list
for model in pred_list:
data_frame[model[0] + "_GPY"] = model[2][:, 0]
data_frame[model[0] + "_KF"] = model[2][:, 1]
# - Save the dataframe as .csv
data_frame.to_csv(Path(folder_csv_out.path, 'results.csv'), sep=',')
# - Save the config file as .json
with open(Path(folder_json_out.path, 'inference_time_arpl.json'), 'w') as outfile:
json.dump(json_config, outfile)
| 8,807 | 35.7 | 128 | py |
GaPT | GaPT-main/experiments/drag/residual_arpl_miso.py | """
Residual lenarning MISO
"""
import logging
import json
import numpy as np
import pandas as pd
from pathlib import Path
import torch
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
from configs import OUTPUT_DIR
from base.data.data_loader import GPDataLoader
from configs.config_parsers import ArgParser, LoggerParser, MatplotlibParser
from model import datasets
from model import regressors
from tools.file_utils import Folder
##############################
# LOAD CONFIG #
##############################
parser = ArgParser()
config = parser.args
json_config = parser.json
dataset_config = config.DATASET
regressors_config = config.REGRESSORS
seeds_config = config.SEEDS
gpytorch_config = config.GPT_MODEL
session_id = config.SESSION_NAME
log_config = config.LOG
# SESSION NAME
session_name = session_id
# Fix the Seed for numpy and pytorch
np.random.seed(seeds_config["numpy"])
torch.manual_seed(seeds_config["torch"])
# OUTPUT FOLDER AND LOGGING
run_id = ''.join([session_name, '_', datetime.now().strftime(r'%m%d_%H%M%S')])
session_folder = Folder(OUTPUT_DIR, run_id)
folder_csv_out = Folder(session_folder.path, 'data_out')
folder_json_out = Folder(session_folder.path, 'configuration')
folder_figures_out = Folder(session_folder.path, 'figures')
folder_model_out = Folder(session_folder.path, 'model')
# LOGGING
LoggerParser.setup_logging(save_dir=session_folder.path)
logging.info("Session: {} started".format(session_name))
# MATLAB
matplt_conf = MatplotlibParser.get_matplot_conf()
plt.rcParams["figure.figsize"] = (matplt_conf["figsize"][0], matplt_conf["figsize"][1])
matplotlib.rc("font", **matplt_conf["font"])
matplotlib.rc("axes", **matplt_conf["axes"])
matplotlib.rc("figure", **matplt_conf["figure"])
matplotlib.rcParams["pdf.fonttype"] = matplt_conf["pdf_fonttype"]
matplotlib.rcParams["ps.fonttype"] = matplt_conf["ps_fonttype"]
matplot_colors = matplt_conf["colors"]
matplot_line_tick = matplt_conf["line_thickness"]
matplot_mark_size = matplt_conf["marker_size"]
matplot_line_style = matplt_conf["extra_linestyles"]
matplot_mark_style = matplt_conf["markers"]
#############################################
# LOAD THE DATASET #
#############################################
# TRAINING
# Get the parameters of the training dataset
dataset_args_tr = dataset_config["train"]["args"]
dataset_model_tr = dataset_config["train"]["class"]
dataset_class_tr = getattr(datasets, dataset_model_tr)
# Instantiate the training dataset
training_dataset = dataset_class_tr(**dataset_args_tr)
training_unscaled = dataset_class_tr(**dataset_args_tr)
# TEST
# Get the parameters of the training dataset
dataset_args_ts = dataset_config["test"]["args"]
dataset_model_ts = dataset_config["test"]["class"]
dataset_class_ts = getattr(datasets, dataset_model_ts)
# Instantiate the testing dataset
testing_dataset = dataset_class_ts(**dataset_args_ts)
testing_unscaled = dataset_class_ts(**dataset_args_ts)
#############################################
# NORMALIZE DATASETS #
#############################################
# Get the mean and std
mean_y_tr = torch.mean(training_dataset.Y)
std_y_tr = torch.std(training_dataset.Y)
mean_y_ts = torch.mean(testing_dataset.Y)
std_y_ts = torch.std(testing_dataset.Y)
# TRAINING
for i in range(training_dataset.X[:, :].shape[1]):
std_x_0_tr, mean_x_0_tr = torch.std_mean(training_dataset.X[:, i])
if i > 0:
std_x_0_tr = std_x_0_tr*10
training_dataset.X[:, i] = (training_dataset.X[:, i] - mean_x_0_tr) / std_x_0_tr
training_dataset.Y = (training_dataset.Y - mean_y_tr) / std_y_tr
# TESTING
for i in range(testing_dataset.X[:, :].shape[1]):
std_x_0_ts, mean_x_0_ts = torch.std_mean(testing_dataset.X[:, i])
testing_dataset.X[:, i] = (testing_dataset.X[:, i] - mean_x_0_ts) / std_x_0_ts
testing_dataset.Y = (testing_dataset.Y - mean_y_ts) / std_y_ts
#############################################
# REGRESSOR TRAINING #
#############################################
# Define the dataloader
k_points = gpytorch_config["k_training_points"]
train_dataloader = GPDataLoader(training_dataset, k_training_points=k_points)
# Create the model from the config file
regressor_name = regressors_config['used'][0] # We use just one regressor
model_class = getattr(regressors, regressor_name)
model = model_class(id_model="Residual_Regressor", input_dim=len(training_dataset.x_column))
# Train the model using GpyTorch
model.train_hyperparams(train_dataloader=train_dataloader, gp_training_conf=gpytorch_config)
############################################
# PREDICTION GPYTORCH #
#############################################
# TRAINING
gpt_pred_mean_tr, gpt_pred_lower_tr, gpt_pred_upper_tr = model.predict_gpyt(training_dataset.X)
gpt_pred_mean_tr = mean_y_tr + gpt_pred_mean_tr * std_y_tr
gpt_pred_lower_tr = gpt_pred_lower_tr * std_y_tr
gpt_pred_upper_tr = gpt_pred_upper_tr * std_y_tr
# TEST
gpt_pred_mean_ts, gpt_pred_lower_ts, gpt_pred_upper_ts = model.predict_gpyt(testing_dataset.X)
gpt_pred_mean_ts = mean_y_tr + gpt_pred_mean_ts * std_y_tr
gpt_pred_lower_ts = gpt_pred_lower_ts * std_y_tr
gpt_pred_upper_ts = gpt_pred_upper_ts * std_y_tr
#############################################
# PREDICTIONS KF #
#############################################
# TRAIN
# get the training dataset
x_tr_kal = training_dataset.X.numpy()
y_tr_kal = training_dataset.Y.numpy()
t_tr_kal = training_dataset.timestamp.numpy()
# Select the training subset used for the kalman filter
x_tr_kal = x_tr_kal[1200:4800]
y_tr_kal = y_tr_kal[1200:4800]
t_tr_kal = t_tr_kal[1200:4800]
# Sort the indexes
index_tr = np.argsort(x_tr_kal[:, 0])
# Get the rollback indexes
rollback_indexes_tr = np.empty_like(index_tr)
rollback_indexes_tr[index_tr] = np.arange(index_tr.size, dtype=int)
# Sort the training dataset used for the prediction matching
x_tr_kal_sort = x_tr_kal[index_tr]
y_tr_kal_sort = y_tr_kal[index_tr]
t_tr_kal_sort = t_tr_kal[index_tr]
# Predict
kf_pred_mean_tr, kf_pred_lower_tr, kf_pred_upper_tr = model.predict_kf_miso(x_training=x_tr_kal_sort,
y_training=y_tr_kal_sort,
z_mean=mean_y_tr.numpy(),
z_std=std_y_tr.numpy(),
x_test=training_dataset.X.numpy(),
reversed=False)
# TEST
kf_pred_mean_ts, kf_pred_lower_ts, kf_pred_upper_ts = model.predict_kf_miso(x_training=x_tr_kal_sort,
y_training=y_tr_kal_sort,
z_mean=mean_y_ts.numpy(),
z_std=std_y_ts.numpy(),
x_test=testing_dataset.Y.numpy(),
reversed=True)
#########################################
# FIGURES #
#########################################
min_y_tr = torch.min(training_unscaled.Y)
max_y_tr = torch.max(training_unscaled.Y)
y_lim_tr = [min_y_tr + (0.1 * min_y_tr), max_y_tr + (0.1 * max_y_tr)]
min_y_ts = torch.min(testing_unscaled.Y)
max_y_ts = torch.max(testing_unscaled.Y)
y_lim_ts = [min_y_ts + (0.1 * min_y_ts), max_y_ts + (0.1 * max_y_ts)]
##############
# DATASET #
##############
# Define the dataloader
train_un_dataloader = GPDataLoader(training_unscaled, k_training_points=k_points)
# get the x and Y training just to display figures
x_train_gp, y_train_gp, ts_train_gp = next(iter(train_un_dataloader))
# t - res_vy
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Sampled Pts: $time$ VS. $\tilde{a}_y$")
ax.plot(training_unscaled.timestamp, training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(ts_train_gp, y_train_gp, color=matplot_colors[1], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size-1, linestyle=matplot_line_style[4], label="Training")
ax.set_xlabel(r"$elapsed$ $time$ $\mathrm{[s]}$")
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_training_t_vy" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
# vy - res_vy
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Sampled Pts: $v_y$ VS. $\tilde{a}_y$")
ax.plot(training_unscaled.X[:, 0], training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(x_train_gp[:, 0], y_train_gp, color=matplot_colors[1], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size-1, linestyle=matplot_line_style[4], label="Training")
ax.set_xlabel(r"$v_y$ $\mathrm{[m/s]}$")
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_training_vy" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
# u1 - res_vy
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Sampled Pts: Motor 1 VS. $\tilde{a}_y$")
ax.plot(training_unscaled.X[:, 1], training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(x_train_gp[:, 1], y_train_gp, color=matplot_colors[1], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size-1, linestyle=matplot_line_style[4], label="Training")
ax.set_xlabel(r"Motor 1 speed $\mathrm{[rpm]}$")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_training_u1" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
# u2 - res_vy
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Sampled Pts: Motor 2 VS. $\tilde{a}_y$")
ax.plot(training_unscaled.X[:, 2], training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(x_train_gp[:, 2], y_train_gp, color=matplot_colors[1], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size-1, linestyle=matplot_line_style[4], label="Training")
ax.set_xlabel(r"Motor 2 speed $\mathrm{[rpm]}$")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_training_u2" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
# u3 - res_vy
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Sampled Pts: Motor 3 VS. $\tilde{a}_y$")
ax.plot(training_unscaled.X[:, 3], training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(x_train_gp[:, 3], y_train_gp, color=matplot_colors[1], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size-1, linestyle=matplot_line_style[4], label="Training")
ax.set_xlabel(r"Motor 3 speed $\mathrm{[rpm]}$")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_training_u3" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
# u4 - res_vy
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Sampled Pts: Motor 4 VS. $\tilde{a}_y$")
ax.plot(training_unscaled.X[:, 4], training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(x_train_gp[:, 4], y_train_gp, color=matplot_colors[1], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size-1, linestyle=matplot_line_style[4], label="Training")
ax.set_xlabel(r"Motor 4 speed $\mathrm{[rpm]}$")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_training_u4" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
##############
# TRAINING #
##############
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Training dataset: $v_y$ VS. $\tilde{a}_y$")
ax.plot(training_unscaled.X[:, 0], training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(training_unscaled.X[:, 0], kf_pred_mean_tr, color=matplot_colors[3], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4],
label=r"$\tilde{a}_{y_{KF}}$")
ax.plot(training_unscaled.X[:, 0], gpt_pred_mean_tr, color=matplot_colors[1], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{GPT}}$")
ax.set_xlabel(r"$v_y$ $\mathrm{[m/s]}$")
ax.set_ylabel(r"$\tilde{a}_y^t$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_KF_pred_train_1" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Training dataset: $time$ VS. $\tilde{a}_y$")
ax.plot(training_unscaled.timestamp, training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(training_unscaled.timestamp, kf_pred_mean_tr, color=matplot_colors[3], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{KF}}$")
ax.plot(training_unscaled.timestamp, gpt_pred_mean_tr, color=matplot_colors[1], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{GPT}}$")
ax.fill_between(training_unscaled.timestamp, kf_pred_upper_tr, kf_pred_lower_tr, alpha=0.5, color=matplot_colors[3])
ax.set_xlabel(r"$elapsed$ $time$ $\mathrm{[s]}$")
ax.set_ylabel(r"$\tilde{a}_y^t$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_KF_pred_train_2" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
##############
# TEST #
##############
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Testing dataset: $time$ VS. $\tilde{a}_y$")
ax.plot(testing_unscaled.X[:, 0], testing_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(testing_unscaled.X[:, 0], kf_pred_mean_ts, color=matplot_colors[3], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4],
label=r"$\tilde{a}_{y_{KF}}$")
ax.plot(testing_unscaled.X[:, 0], gpt_pred_mean_ts, color=matplot_colors[1], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{GPT}}$")
ax.set_xlabel(r"$v_y^t$ $\mathrm{[m/s]}$")
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_ts)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_KF_pred_test_1" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Testing dataset: $time$ VS. $\tilde{a}_y$")
ax.plot(testing_unscaled.timestamp, testing_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(testing_unscaled.timestamp, kf_pred_mean_ts, color=matplot_colors[3], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{KF}}$")
ax.plot(testing_unscaled.timestamp, gpt_pred_mean_ts, color=matplot_colors[1], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{GPT}}$")
ax.fill_between(testing_unscaled.timestamp, kf_pred_upper_ts, kf_pred_lower_ts, alpha=0.5, color=matplot_colors[3])
ax.set_xlabel(r"$elapsed$ $time$ $\mathrm{[s]}$")
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_ts)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "miso_KF_pred_test_2" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
##############################
# OUTPUTS #
##############################
# - Save Gptmodel
model.dump_model(folder_model_out.path)
# - Create the dataframes
# Training dataset
columns_tr = ["gpt_pred_mean_tr", "gpt_pred_lower_tr", "gpt_pred_upper_tr",
"kf_pred_mean_tr", "kf_pred_lower_tr", "kf_pred_upper_tr"]
val_tr = [gpt_pred_mean_tr.numpy(), gpt_pred_lower_tr.numpy(), gpt_pred_upper_tr.numpy(),
kf_pred_mean_tr, kf_pred_lower_tr, kf_pred_upper_tr]
columns_tr.append(training_unscaled.t_column)
val_tr.append(training_unscaled.timestamp.numpy())
for i in range(len(training_unscaled.x_column)):
columns_tr.append(training_unscaled.x_column[i])
val_tr.append(training_unscaled.X[:, i].squeeze().numpy())
columns_tr.append(training_unscaled.y_column)
val_tr.append(training_unscaled.Y.numpy())
# Training dataset used for the update fase on the KF
columns_tr_used = ["t_tr_sort_scal_KF", "x_tr_sort_scal_KF", "y_tr_sort_scal_KF", 'desorting_indexes']
val_tr_used = [t_tr_kal_sort, x_tr_kal_sort, y_tr_kal_sort, rollback_indexes_tr]
# test dataset
columns_ts = ["gpt_pred_mean_ts", "gpt_pred_lower_ts", "gpt_pred_upper_ts",
"kf_pred_mean_ts", "kf_pred_lower_ts", "kf_pred_upper_ts"]
val_ts = [gpt_pred_mean_ts.numpy(), gpt_pred_lower_ts.numpy(), gpt_pred_upper_ts.numpy(),
kf_pred_mean_ts, kf_pred_lower_ts, kf_pred_upper_ts]
columns_ts.append(testing_unscaled.t_column)
val_ts.append(testing_unscaled.timestamp.numpy())
for i in range(len(testing_unscaled.x_column)):
columns_ts.append(testing_unscaled.x_column[i])
val_ts.append(testing_unscaled.X[:, i].squeeze().numpy())
columns_ts.append(testing_unscaled.y_column)
val_ts.append(testing_unscaled.Y.numpy())
data_frame_tr = pd.DataFrame(columns=columns_tr)
data_tr = np.asarray(val_tr).T
data_frame_tr[columns_tr] = data_tr
data_frame_tr.reset_index(drop=True)
data_frame_tr_used = pd.DataFrame(columns=columns_tr_used)
data_tr_used = np.vstack(val_tr_used).T
data_frame_tr_used[columns_tr_used] = data_tr_used
data_frame_tr_used.reset_index(drop=True)
data_frame_ts = pd.DataFrame(columns=columns_ts)
data_ts = np.asarray(val_ts).T
data_frame_ts[columns_ts] = data_ts
data_frame_ts.reset_index(drop=True)
# - Save the dataframe as .csv
data_frame_tr.to_csv(Path(folder_csv_out.path, 'out_train.csv'), sep=',', index=False)
data_frame_ts.to_csv(Path(folder_csv_out.path, 'out_test.csv'), sep=',', index=False)
data_frame_tr_used.to_csv(Path(folder_csv_out.path, 'out_train_used.csv'), sep=',', index=False)
# - Save the config file as .json
with open(Path(folder_json_out.path, 'residual_arpl_miso_rpms.json'), 'w') as outfile:
json.dump(json_config, outfile)
| 21,062 | 43.064854 | 119 | py |
GaPT | GaPT-main/experiments/drag/residual_arpl_siso.py | """
Residual lenarning SISO
"""
import logging
import json
import sys
import yaml
import math
import numpy as np
import pandas as pd
from pathlib import Path
import torch
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
from configs import OUTPUT_DIR
from base.data.data_loader import GPDataLoader
from configs.config_parsers import ArgParser, LoggerParser, MatplotlibParser
from model import datasets
from model import regressors
from tools.file_utils import Folder
from sklearn.metrics import mean_squared_error
##############################
# LOAD CONFIG #
##############################
parser = ArgParser()
config = parser.args
json_config = parser.json
dataset_config_train = config.DATASET
regressors_config = config.REGRESSORS
seeds_config = config.SEEDS
gpytorch_config = config.GPT_MODEL
session_id = config.SESSION_NAME
log_config = config.LOG
# SESSION NAME
session_name = session_id
# Fix the Seed for numpy and pytorch
np.random.seed(seeds_config["numpy"])
torch.manual_seed(seeds_config["torch"])
# OUTPUT FOLDER AND LOGGING
run_id = ''.join([session_name, '_', datetime.now().strftime(r'%m%d_%H%M%S')])
session_folder = Folder(OUTPUT_DIR, run_id)
folder_csv_out = Folder(session_folder.path, 'data_out')
folder_json_out = Folder(session_folder.path, 'configuration')
folder_figures_out = Folder(session_folder.path, 'figures')
folder_yamlcpp_out = Folder(session_folder.path, 'yaml')
folder_model_out = Folder(session_folder.path, 'model')
# LOGGING
LoggerParser.setup_logging(save_dir=session_folder.path)
logging.info("Session: {} started".format(session_name))
# MATLAB
matplt_conf = MatplotlibParser.get_matplot_conf()
plt.rcParams["figure.figsize"] = (matplt_conf["figsize"][0], matplt_conf["figsize"][1])
matplotlib.rc("font", **matplt_conf["font"])
matplotlib.rc("axes", **matplt_conf["axes"])
matplotlib.rc("figure", **matplt_conf["figure"])
matplotlib.rc("xtick.major", **matplt_conf["xtick"]["major"])
matplotlib.rc("ytick.major", **matplt_conf["ytick"]["major"])
matplotlib.rcParams["pdf.fonttype"] = matplt_conf["pdf_fonttype"]
matplotlib.rcParams["ps.fonttype"] = matplt_conf["ps_fonttype"]
matplot_colors = matplt_conf["colors"]
matplot_line_tick = matplt_conf["line_thickness"]
matplot_mark_size = matplt_conf["marker_size"]
matplot_line_style = matplt_conf["extra_linestyles"]
matplot_mark_style = matplt_conf["markers"]
#############################################
# LOAD THE DATASET #
#############################################
# TRAINING
# Get the parameters of the training dataset
dataset_args_tr = dataset_config_train["train"]["args"]
dataset_model_tr = dataset_config_train["train"]["class"]
dataset_class_tr = getattr(datasets, dataset_model_tr)
# Instantiate the training dataset
training_dataset = dataset_class_tr(**dataset_args_tr)
training_unscaled = dataset_class_tr(**dataset_args_tr)
# TEST
# Get the parameters of the training dataset
dataset_args_ts = dataset_config_train["test"]["args"]
dataset_model_ts = dataset_config_train["test"]["class"]
dataset_class_ts = getattr(datasets, dataset_model_ts)
# Instantiate the training dataset
testing_dataset = dataset_class_ts(**dataset_args_ts)
testing_unscaled = dataset_class_ts(**dataset_args_ts)
#############################################
# NORMALIZE DATASETS #
#############################################
# Get the mean and std
mean_y_tr = torch.mean(training_dataset.Y)
std_y_tr = torch.std(training_dataset.Y)
mean_y_ts = torch.mean(testing_dataset.Y)
std_y_ts = torch.std(testing_dataset.Y)
# normalization on the values
# L2 norm on the rpms (last four columns of the dataset)
# Z score on the first column and the
# TRAINING
mean_vx_tr = torch.mean(training_dataset.X[:, 0], dim=0)
std_vx_tr = torch.std(training_dataset.X[:, 0], dim=0)
training_dataset.X[:, 0] = (training_dataset.X[:, 0] - mean_vx_tr) / std_vx_tr
training_dataset.Y = (training_dataset.Y - mean_y_tr) / std_y_tr
# TESTING
mean_vx_ts = torch.mean(testing_dataset.X[:, 0], dim=0)
std_vx_ts = torch.std(testing_dataset.X[:, 0], dim=0)
testing_dataset.X[:, 0] = (testing_dataset.X[:, 0] - mean_vx_ts) / std_vx_ts
testing_dataset.Y = (testing_dataset.Y - mean_y_ts) / std_y_ts
#############################################
# REGRESSOR TRAINING #
#############################################
# Define the dataloader
k_points = gpytorch_config["k_training_points"]
train_dataloader = GPDataLoader(training_dataset, k_training_points=k_points)
# Create the model from the config file
regressor_name = regressors_config['used'][0] # We use just one regressor
model_class = getattr(regressors, regressor_name)
model = model_class(id_model="Residual_Regressor", input_dim=1)
# Train the model using GpyTorch
model.train_hyperparams(train_dataloader=train_dataloader, gp_training_conf=gpytorch_config)
#############################################
# PREDICTION GPYTORCH #
#############################################
# TRAINING
gpt_pred_mean_tr, gpt_pred_lower_tr, gpt_pred_upper_tr = model.predict_gpyt(training_dataset.X)
gpt_pred_mean_tr = mean_y_tr + gpt_pred_mean_tr * std_y_tr
gpt_pred_lower_tr = gpt_pred_lower_tr * std_y_tr
gpt_pred_upper_tr = gpt_pred_upper_tr * std_y_tr
# TEST
gpt_pred_mean_ts, gpt_pred_lower_ts, gpt_pred_upper_ts = model.predict_gpyt(testing_dataset.X)
gpt_pred_mean_ts = mean_y_tr + gpt_pred_mean_ts * std_y_tr
gpt_pred_lower_ts = gpt_pred_lower_ts * std_y_tr
gpt_pred_upper_ts = gpt_pred_upper_ts * std_y_tr
#############################################
# PREDICTIONS KF #
#############################################
# in GpyTorch the data are stored and sorted. we do
# the same thing here -> We sort the data (speed
# and residual) according the values of the speed.
# TRAINING
# get the training dataset
x_tr_kal = training_dataset.X.squeeze().numpy()
y_tr_kal = training_dataset.Y.squeeze().numpy()
t_tr_kal = training_dataset.timestamp.numpy()
x_tr_kal = x_tr_kal[1200:2400]
y_tr_kal = y_tr_kal[1200:2400]
t_tr_kal = t_tr_kal[1200:2400]
# a) Get the indexes to sorting elements in the dataset (if used the regressor works as the same as Gpytorch)
sort_index_tr = np.argsort(x_tr_kal)
# b) Get the rollback indexes
rollback_indexes_tr = np.empty_like(sort_index_tr)
rollback_indexes_tr[sort_index_tr] = np.arange(sort_index_tr.size, dtype=int)
x_tr_kal_sorted = x_tr_kal[sort_index_tr]
y_tr_kal_sorted = y_tr_kal[sort_index_tr]
t_tr_kal_sorted = t_tr_kal[sort_index_tr]
kf_pred_mean_tr, kf_pred_lower_tr, kf_pred_upper_tr = model.predict_kf_mean_cov(
x_training=x_tr_kal_sorted, y_training=y_tr_kal_sorted,
z_mean=mean_y_tr.numpy(),
z_std=std_y_tr.numpy(),
x_test=training_dataset.X.numpy(),
Qgain=0.01)
# TEST
x_ts_kal = testing_dataset.X.squeeze().numpy()
y_ts_kal = testing_dataset.Y.squeeze().numpy()
t_ts_kal = testing_dataset.timestamp.squeeze().numpy()
kf_pred_mean_ts, kf_pred_lower_ts, kf_pred_upper_ts = model.predict_kf_mean_cov(
x_training=x_tr_kal_sorted, y_training=y_tr_kal_sorted,
z_mean=mean_y_ts.numpy(),
z_std=std_y_ts.numpy(),
x_test=testing_dataset.X.numpy(), Qgain=0.1)
#########################################
# FIGURES #
#########################################
min_y_tr = torch.min(training_unscaled.Y)
max_y_tr = torch.max(training_unscaled.Y)
y_lim_tr = [min_y_tr + (0.1 * min_y_tr), max_y_tr + (0.1 * max_y_tr)]
min_y_ts = torch.min(testing_unscaled.Y)
max_y_ts = torch.max(testing_unscaled.Y)
y_lim_ts = [min_y_ts + (0.1 * min_y_ts), max_y_ts + (0.1 * max_y_ts)]
##############
# DATASET #
##############
# Define the dataloader
train_dataloader = GPDataLoader(training_unscaled, k_training_points=k_points)
# get the x and Y training just to display figures
x_train_gp, y_train_gp, ts_train_gp = next(iter(train_dataloader))
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Sampled Pts: $v_y$ VS. $\tilde{a}_y$")
ax.plot(training_unscaled.X, training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(x_train_gp, y_train_gp, color=matplot_colors[1], marker=matplot_mark_style[5],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="Training")
ax.set_xlabel(r"$v_y$ $\mathrm{[m/s]}$")
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "siso_training_data_1" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Sampled Pts: $time$ VS. $\tilde{a}_y$")
ax.plot(training_unscaled.timestamp, training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(ts_train_gp, y_train_gp, color=matplot_colors[1], marker=matplot_mark_style[5],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="Training")
ax.set_xlabel(r"$elapsed$ $time$ $\mathrm{[s]}$")
ax.set_ylabel(r"$v_y$ $\mathrm{[m/s]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "siso_training_data_2" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
##############
# TRAINING #
##############
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Training dataset: $v_y$ VS. $\tilde{a}_y$")
ax.plot(training_unscaled.X, training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(training_unscaled.X, kf_pred_mean_tr, color=matplot_colors[3], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4],
label=r"$\tilde{a}_{y_{KF}}$")
ax.plot(training_unscaled.X, gpt_pred_mean_tr, color=matplot_colors[1], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{GPT}}$")
ax.set_xlabel(r"$v_y$ $\mathrm{[m/s]}$")
ax.set_ylabel(r"$\tilde{a}_y^t$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "siso_KF_pred_train_1" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Training dataset: $time$ VS. $\tilde{a}_y$")
ax.plot(training_unscaled.timestamp, training_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(training_unscaled.timestamp, kf_pred_mean_tr, color=matplot_colors[3], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{KF}}$")
ax.plot(training_unscaled.timestamp, gpt_pred_mean_tr, color=matplot_colors[1], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{GPT}}$")
ax.fill_between(training_unscaled.timestamp, kf_pred_upper_tr, kf_pred_lower_tr, alpha=0.5, color=matplot_colors[3])
ax.set_xlabel(r"$elapsed$ $time$ $\mathrm{[s]}$")
ax.set_ylabel(r"$\tilde{a}_y^t$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_tr)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "siso_KF_pred_train_2" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
##############
# TEST #
##############
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Testing dataset: $V_y$ VS. $\tilde{a}_y$")
ax.plot(testing_unscaled.X, testing_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(testing_unscaled.X, kf_pred_mean_ts, color=matplot_colors[3], marker=matplot_mark_style[7],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4],
label=r"$\tilde{a}_{y_{KF}}$")
ax.plot(testing_unscaled.X, gpt_pred_mean_ts, color=matplot_colors[1], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{GPT}}$")
ax.set_xlabel(r"$v_y$ $\mathrm{[m/s]}$")
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_ts)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "siso_KF_pred_test_1" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
fig, ax = plt.subplots(1, 1)
ax.set_title(r"Testing dataset: $time$ VS. $\tilde{a}_y$")
ax.plot(testing_unscaled.timestamp, testing_unscaled.Y, color=matplot_colors[0], marker=matplot_mark_style[2],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[4], label="GT")
ax.plot(testing_unscaled.timestamp, kf_pred_mean_ts, color=matplot_colors[3], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{KF}}$")
ax.plot(testing_unscaled.timestamp, gpt_pred_mean_ts, color=matplot_colors[1], marker=matplot_mark_style[8],
linewidth=matplot_line_tick, markersize=matplot_mark_size, linestyle=matplot_line_style[0],
label=r"$\tilde{a}_{y_{GPT}}$")
ax.fill_between(testing_unscaled.timestamp, kf_pred_upper_ts, kf_pred_lower_ts, alpha=0.5, color=matplot_colors[3])
ax.set_xlabel(r"$elapsed$ $time$ $\mathrm{[s]}$")
ax.set_ylabel(r"$\tilde{a}_y$ $\mathrm{[m/s^2]}$")
ax.grid(alpha=0.3)
ax.set_ylim(y_lim_ts)
ax.legend(loc="best", markerscale=5, fontsize=15)
fig.tight_layout()
path = Path(folder_figures_out.path, "siso_KF_pred_test_2" + ".pdf")
fig.savefig(path, format="pdf", bbox_inches="tight")
##############################
# OUTPUTS #
##############################
# - Save Gptmodel
model.dump_model(folder_model_out.path)
# - Create the dataframe
columns_tr = [training_unscaled.t_column, training_unscaled.x_column[0], training_unscaled.y_column,
"gpt_pred_mean_tr", "gpt_pred_lower_tr", "gpt_pred_upper_tr",
"kf_pred_mean_tr", "kf_pred_lower_tr", "kf_pred_upper_tr"]
val_tr = [training_unscaled.timestamp.numpy(), training_unscaled.X.squeeze().numpy(),
training_unscaled.Y.squeeze().numpy(),
gpt_pred_mean_tr.numpy(), gpt_pred_lower_tr.numpy(), gpt_pred_upper_tr.numpy(),
kf_pred_mean_tr, kf_pred_lower_tr, kf_pred_upper_tr]
columns_tr_used = ["t_tr_sort_scal_KF", "x_tr_sort_scal_KF", "y_tr_sort_scal_KF", 'desorting_indexes']
val_tr_used = [t_tr_kal_sorted, x_tr_kal_sorted, y_tr_kal_sorted, rollback_indexes_tr]
columns_ts = [testing_unscaled.t_column, testing_unscaled.x_column[0], testing_unscaled.y_column,
"gpt_pred_mean_ts", "gpt_pred_lower_ts", "gpt_pred_upper_ts",
"kf_pred_mean_ts", "kf_pred_lower_ts", "kf_pred_upper_ts"]
val_ts = [testing_unscaled.timestamp.numpy(), testing_unscaled.X.squeeze().numpy(),
testing_unscaled.Y.squeeze().numpy(),
gpt_pred_mean_ts.numpy(), gpt_pred_lower_ts.numpy(), gpt_pred_upper_ts.numpy(),
kf_pred_mean_ts, kf_pred_lower_ts, kf_pred_upper_ts]
data_frame_tr = pd.DataFrame(columns=columns_tr)
data_tr = np.vstack(val_tr).T
data_frame_tr[columns_tr] = data_tr
data_frame_tr.reset_index(drop=True)
np.testing.assert_array_equal(data_tr[:, 0], val_tr[0][:], err_msg='', verbose=True)
data_frame_tr_used = pd.DataFrame(columns=columns_tr_used)
data_tr_used = np.vstack(val_tr_used).T
data_frame_tr_used[columns_tr_used] = data_tr_used
data_frame_tr_used.reset_index(drop=True)
data_frame_ts = pd.DataFrame(columns=columns_ts)
data_ts = np.vstack(val_ts).T
data_frame_ts[columns_ts] = data_ts
data_frame_ts.reset_index(drop=True)
np.testing.assert_array_equal(data_ts[:, 0], val_ts[0][:], err_msg='', verbose=True)
# - Save the dataframe as .csv
data_frame_tr.to_csv(Path(folder_csv_out.path, 'out_train.csv'), sep=',', index=False)
data_frame_tr_used.to_csv(Path(folder_csv_out.path, 'out_train_used.csv'), sep=',', index=False)
data_frame_ts.to_csv(Path(folder_csv_out.path, 'out_test.csv'), sep=',', index=False)
# - Save the config file as .json
with open(Path(folder_json_out.path, 'residual_arpl_siso.json'), 'w') as outfile:
json.dump(json_config, outfile)
# - Create the yamlfile
discrete_model = model._pssgp_cov.get_sde()
P_inf = discrete_model[0].numpy().flatten().tolist()
F = discrete_model[1].numpy().flatten().tolist()
L = discrete_model[2].numpy().flatten().tolist()
H = discrete_model[3].numpy().flatten().tolist()
Q = (discrete_model[4].numpy()).flatten().tolist()
xtrain_yaml = x_tr_kal_sorted.flatten().tolist()
ytrain_yaml = y_tr_kal_sorted.flatten().tolist()
R = float(model._kf.R)
Order = int(model._order)
meanY_yaml = np.mean(training_unscaled.Y.numpy())
std_yaml = np.std(training_unscaled.Y.numpy())
dict_file = {"axis": "y_drag",
"y_drag": {
"R": R,
"Order": int(model._order),
"x_fact": 1,
"P_inf": P_inf,
"F": F,
"L": L,
"H": H,
"Q": Q,
"xtrain": xtrain_yaml,
"ytrain": ytrain_yaml,
"meanY": float(meanY_yaml),
"std": float(std_yaml),
}
}
with open(Path(folder_yamlcpp_out.path, 'model_params.yaml'), 'w', ) as outfile:
documents = yaml.dump(dict_file, outfile, )
| 18,215 | 40.4 | 117 | py |
GaPT | GaPT-main/model/datasets/arpl.py | import torch
import pandas as pd
from base.data.base_dataset import BaseDataset
class Arpl(BaseDataset):
"""
The Dataset class to handle the csv generated by acquisition by the ARPL lab,
NYU. The original data is stored in RosBags and the .csv files are generated
with the python script "arpl_stand_alone_preprocess.py". Setting up the
script is possible to obtain a csv with a lot of measurements:
- CSV labels:
- Linear Velocities: "v_x_t", "v_y_t", "v_z_t"
- Quaternions: "q_w_t", "q_x_t", "q_y_t", "q_z_t"
- Angular Velocities: "w_x_t", "w_y_t", "w_z_t",
- Motors forces: "f_0_t", "f_1_t", "f_2_t", "f_3_t"
- Linear Accelerations: "vdot_x", "vdot_y", "vdot_z"
- Angular Accelerations: "wdot_x", "wdot_y", "wdot_z"
- Residual linear acceleration: "res_vdot_x", "res_vdot_y", "res_vdot_z
- Nominal linear, angular velocities: "vdot_nom", "wdot_nom"
The regressor in this framework work just with single vectors for samples and labels, so you can just pass as
argument the desired column labels.
If Some special operations are needed, you can simply create a new class for the desired data:
1) extending the ARPL class,
2) implementing the _extra_operations method.
"""
def __init__(self, alias: str, csv_path: str,
x_col_label: str, y_col_label: str, t_col_label: str, start_ix: int, end_ix: int):
self.x_column = x_col_label
self.y_column = y_col_label
self.t_column = t_col_label
self.csv_path = csv_path
alias = ''.join(['ARPL_', alias])
super().__init__(dataset_label=alias)
self.select_subset(start_ix, end_ix)
def _init_dataset(self):
data_frame = pd.read_csv(self.csv_path)
df_x = data_frame[self.x_column]
df_y = data_frame[self.y_column]
df_t = data_frame[self.t_column]
self.X = torch.tensor(df_x.to_numpy(), dtype=torch.float)
self.Y = torch.tensor(df_y.to_numpy(), dtype=torch.float)
self.timestamp = torch.tensor(df_t.to_numpy(), dtype=torch.float)
| 2,153 | 44.829787 | 113 | py |
GaPT | GaPT-main/model/regressors/rbf.py | import gpytorch
from base.gapt.kalman import KalmanSISO, KalmanMISO
from base.gapt.kernels import RBF
from base.regressor import GPRegressor
import logging
class RBFModel(GPRegressor):
def __init__(self, id_model: str, input_dim: int = 1):
# Define the RBF model name, this is useful to handle multiple instances
# of the same model.
reg_name = ''.join(['Rbf_', id_model])
# Definition of the kernel and the likelihood used for the Gpytorch
# Regressor
rbf_kernel = gpytorch.kernels.RBFKernel()
rbf_likelihood = gpytorch.likelihoods.GaussianLikelihood()
super().__init__(kernel=rbf_kernel, likelihood=rbf_likelihood, reg_name=reg_name, input_dim=input_dim)
# Set the approximation order and the balancing iters for the RBF-SDE kernel
self._order = 6 # Order of the RBF approximation for (P)SSGP
self._balancing_iter = 5 # Number of balancing steps for the resulting SDE to make it more stable
def _create_sde_model(self):
noise = self._gpt_likelihood.noise.detach().numpy().flatten()[0]
lengthscale = self._gpt_model.covar_module.lengthscale.detach().numpy().flatten()[0]
self._pssgp_cov = RBF(variance=1, lengthscales=lengthscale,
order=self._order, balancing_iter=self._balancing_iter)
discrete_model = self._pssgp_cov.get_sde()
P_inf = discrete_model[0].numpy()
F = discrete_model[1].numpy()
L = discrete_model[2].numpy()
H = discrete_model[3].numpy()
Q = discrete_model[4].numpy()
if (self.input_dim == 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanSISO(F, L, H, Q, noise, order=self._order)
elif (self.input_dim > 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanMISO(F, L, H, Q, noise, order=self._order, repeat=self.input_dim)
else:
msg = "The dimension input input_dim must be an integer > 0. Received value:{}".format(self.input_dim)
logging.error(msg)
raise ValueError(msg)
self._is_ready = True
self._reset_filter()
| 2,161 | 41.392157 | 114 | py |
GaPT | GaPT-main/model/regressors/materns.py | import gpytorch
from base.gapt.kalman import KalmanSISO, KalmanMISO
from base.gapt.kernels import Matern12, Matern32, Matern52
from base.regressor import GPRegressor
import logging
r"""
Below the classes for the Matérn family. According to the equation of the kernel and the implementation in Gpytorch
thr e smoothness parameters \nu define the three kernels:
- Matérn_12 : \nu = 1/2
- Matérn_32 : \nu = 3/2
- Matérn_52 : \nu = 5/2
Note: even if it is possible to set smaller values, the results would be are less smooth.
"""
class Matern12Model(GPRegressor):
def __init__(self, id_model: str, input_dim: int = 1):
# Define the RBF model name, this is useful to handle multiple instances
# of the same model.
reg_name = ''.join(['Matern12_', id_model])
# Definition of the kernel and the likelihood used for the Gpytorch
# Regressor
nu_matern = 1 / 2
matern12_kernel = gpytorch.kernels.MaternKernel(nu=nu_matern)
matern12_likelihood = gpytorch.likelihoods.GaussianLikelihood()
super().__init__(kernel=matern12_kernel, likelihood=matern12_likelihood, reg_name=reg_name, input_dim=input_dim)
# Set the approximation order and the balancing iters for the RBF-SDE kernel
self._order = 1 # Fixed parameter for matern kernel. It is just used to the Kalman filter function
self._balancing_iter = 3 # Number of balancing steps for the resulting SDE to make it more stable
def _create_sde_model(self):
noise = self._gpt_likelihood.noise.detach().numpy().flatten()[0]
lengthscale = self._gpt_model.covar_module.lengthscale.detach().numpy().flatten()[0]
self._pssgp_cov = Matern12(variance=1, lengthscales=lengthscale,
balancing_iter=self._balancing_iter)
discrete_model = self._pssgp_cov.get_sde()
P_inf = discrete_model[0].numpy()
F = discrete_model[1].numpy()
L = discrete_model[2].numpy()
H = discrete_model[3].numpy()
Q = discrete_model[4].numpy()
if (self.input_dim == 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanSISO(F, L, H, Q, noise, order=self._order)
elif (self.input_dim > 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanMISO(F, L, H, Q, noise, order=self._order, repeat=self.input_dim)
else:
msg = "The dimension input input_dim must be an integer > 0. Received value:{}".format(self.input_dim)
logging.error(msg)
raise ValueError(msg)
self._is_ready = True
self._reset_filter()
class Matern32Model(GPRegressor):
def __init__(self, id_model: str, input_dim: int = 1):
# Define the RBF model name, this is useful to handle multiple instances
# of the same model.
reg_name = ''.join(['Matern32_', id_model])
# Definition of the kernel and the likelihood used for the Gpytorch
# Regressor
nu_matern = 3 / 2
matern32_kernel = gpytorch.kernels.MaternKernel(nu=nu_matern)
matern32_likelihood = gpytorch.likelihoods.GaussianLikelihood()
super().__init__(kernel=matern32_kernel, likelihood=matern32_likelihood, reg_name=reg_name, input_dim=input_dim)
# Set the approximation order and the balancing iters for the RBF-SDE kernel
self._order = 2 # Fixed parameter for matern kernel. It is just used to the Kalman filter function
self._balancing_iter = 3 # Number of balancing steps for the resulting SDE to make it more stable
def _create_sde_model(self):
noise = self._gpt_likelihood.noise.detach().numpy().flatten()[0]
lengthscale = self._gpt_model.covar_module.lengthscale.detach().numpy().flatten()[0]
self._pssgp_cov = Matern32(variance=1, lengthscales=lengthscale,
balancing_iter=self._balancing_iter)
discrete_model = self._pssgp_cov.get_sde()
P_inf = discrete_model[0].numpy()
F = discrete_model[1].numpy()
L = discrete_model[2].numpy()
H = discrete_model[3].numpy()
Q = discrete_model[4].numpy()
if (self.input_dim == 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanSISO(F, L, H, Q, noise, order=self._order)
elif (self.input_dim > 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanMISO(F, L, H, Q, noise, order=self._order, repeat=self.input_dim)
else:
msg = "The dimension input input_dim must be an integer > 0. Received value:{}".format(self.input_dim)
logging.error(msg)
raise ValueError(msg)
self._is_ready = True
self._reset_filter()
class Matern52Model(GPRegressor):
def __init__(self, id_model: str, input_dim: int = 1):
# Define the RBF model name, this is useful to handle multiple instances
# of the same model.
reg_name = ''.join(['Matern52_', id_model])
# Definition of the kernel and the likelihood used for the Gpytorch
# Regressor
nu_matern = 5 / 2
matern52_kernel = gpytorch.kernels.MaternKernel(nu=nu_matern)
matern52_likelihood = gpytorch.likelihoods.GaussianLikelihood()
super().__init__(kernel=matern52_kernel, likelihood=matern52_likelihood, reg_name=reg_name, input_dim=input_dim)
# Set the approximation order and the balancing iters for the RBF-SDE kernel
self._order = 3 # Fixed parameter for matern kernel. It is just used to the Kalman filter function
self._balancing_iter = 0 # Number of balancing steps for the resulting SDE to make it more stable
def _create_sde_model(self):
noise = self._gpt_likelihood.noise.detach().numpy().flatten()[0]
lengthscale = self._gpt_model.covar_module.lengthscale.detach().numpy().flatten()[0]
self._pssgp_cov = Matern52(variance=1.0, lengthscales=lengthscale,
balancing_iter=self._balancing_iter)
discrete_model = self._pssgp_cov.get_sde()
P_inf = discrete_model[0].numpy()
F = discrete_model[1].numpy()
L = discrete_model[2].numpy()
H = discrete_model[3].numpy()
Q = discrete_model[4].numpy()
if (self.input_dim == 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanSISO(F, L, H, Q, noise, order=self._order)
elif (self.input_dim > 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanMISO(F, L, H, Q, noise, order=self._order, repeat=self.input_dim)
else:
msg = "The dimension input input_dim must be an integer > 0. Received value:{}".format(self.input_dim)
logging.error(msg)
raise ValueError(msg)
self._is_ready = True
self._reset_filter()
| 6,874 | 46.743056 | 120 | py |
GaPT | GaPT-main/model/regressors/periodic.py | import logging
import gpytorch
from base.gapt.kalman import KalmanSISO, KalmanMISO
from base.gapt.kernels import Periodic
from base.regressor import GPRegressor
class PerRbfModel(GPRegressor):
def __init__(self, id_model: str, input_dim: int = 1):
# Define the RBF model name, this is useful to handle multiple instances
# of the same model.
reg_name = ''.join(['PerRbF_', id_model])
# Define the kernel and the likelihood for the GpyTorch training
per_rbf_kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel()) * \
gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
# per_rbf_kernel = gpytorch.kernels.PeriodicKernel()
rbf_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint=gpytorch.constraints.GreaterThan(1e-3))
super().__init__(kernel=per_rbf_kernel, likelihood=rbf_likelihood, reg_name=reg_name, input_dim=input_dim)
# Set the approximation order and the balancing iters for the RBF-SDE kernel
self._order = 2 # Order of the RBF approximation for (P)SSGP
self._balancing_iter = 2 # Number of balancing steps for the resulting SDE to make it more stable
def _create_sde_model(self):
# Extract the hyperparamenters.
# Noise
noise = self._gpt_likelihood.noise.detach().numpy().flatten()[0]
outputscale_0 = self._gpt_model.covar_module.kernels._modules['0'].outputscale.detach().numpy().flatten()[0]
lengthscale_0_unscaled = \
self._gpt_model.covar_module.kernels._modules['0'].base_kernel.lengthscale.detach().numpy().flatten()[0]
period_0_unscaled = \
self._gpt_model.covar_module.kernels._modules['0'].base_kernel.period_length.detach().numpy().flatten()[0]
per_period = lengthscale_0_unscaled * outputscale_0
per_lengthscale = period_0_unscaled * outputscale_0
outputscale_1 = self._gpt_model.covar_module.kernels._modules['1'].outputscale.detach().numpy().flatten()[0]
lengthscale_1_unscaled = \
self._gpt_model.covar_module.kernels._modules['1'].base_kernel.lengthscale.detach().numpy().flatten()[0]
rbf_lengthscale = lengthscale_1_unscaled * outputscale_1
# TODO: AGGIUNGERE LOGGING PER LOGGARE GLI HYPERPARAMETRIs
self._pssgp_cov = Periodic(lengthscale_base=rbf_lengthscale,
lengthscale_per=per_lengthscale,
period_per=per_period,
order=self._order)
discrete_model = self._pssgp_cov.get_sde()
self._order = 2 * (self._order + 1)
P_inf = discrete_model[0].numpy()
F = discrete_model[1].numpy()
L = discrete_model[2].numpy()
H = discrete_model[3].numpy()
Q = discrete_model[4].numpy()
if (self.input_dim == 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanSISO(F, L, H, Q, noise, order=self._order)
elif (self.input_dim > 1) and (isinstance(self.input_dim, int)):
self._kf = KalmanMISO(F, L, H, Q, noise, order=self._order, repeat=self.input_dim)
else:
msg = "The dimension input input_dim must be an integer > 0. Received value:{}".format(self.input_dim)
logging.error(msg)
raise ValueError(msg)
self._is_ready = True
self._reset_filter()
| 3,446 | 49.691176 | 121 | py |
character-bert | character-bert-main/main.py | # Adapted from Huggingface's transformers library:
# https://github.com/allenai/allennlp/
""" Main script. """
import os
import logging
import argparse
import datetime
from collections import Counter
import torch
from torch.nn import CrossEntropyLoss
from transformers import (
BertConfig,
BertTokenizer,
BertForTokenClassification,
BertForSequenceClassification
)
from utils.character_cnn import CharacterIndexer
from modeling.character_bert import CharacterBertModel
from data import load_classification_dataset, load_sequence_labelling_dataset
from utils.misc import set_seed
from utils.data import retokenize, build_features
from utils.training import train, evaluate
from download import MODEL_TO_URL
AVAILABLE_MODELS = list(MODEL_TO_URL.keys()) + ['bert-base-uncased']
def parse_args():
""" Parse command line arguments and initialize experiment. """
parser = argparse.ArgumentParser()
parser.add_argument(
"--task",
type=str,
required=True,
choices=['classification', 'sequence_labelling'],
help="The evaluation task."
)
parser.add_argument(
"--embedding",
type=str,
required=True,
choices=AVAILABLE_MODELS,
help="The model to use."
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Whether to apply lowercasing during tokenization."
)
parser.add_argument(
"--train_batch_size",
type=int,
default=1,
help="Batch size to use for training."
)
parser.add_argument(
"--eval_batch_size",
type=int,
default=1,
help="Batch size to use for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of gradient accumulation steps."
)
parser.add_argument(
"--num_train_epochs",
type=int,
default=3,
help="Number of training epochs."
)
parser.add_argument(
"--validation_ratio",
default=0.5, type=float, help="Proportion of training set to use as a validation set.")
parser.add_argument(
"--learning_rate",
default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--weight_decay",
default=0.1, type=float, help="Weight decay if we apply some.")
parser.add_argument(
"--warmup_ratio",
default=0.1, type=int, help="Linear warmup over warmup_ratio*total_steps.")
parser.add_argument(
"--adam_epsilon",
default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_grad_norm",
default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--do_train",
action="store_true",
help="Do training & validation."
)
parser.add_argument(
"--do_predict",
action="store_true",
help="Do prediction on the test set."
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="Random seed."
)
args = parser.parse_args()
args.start_time = datetime.datetime.now().strftime('%d-%m-%Y_%Hh%Mm%Ss')
args.output_dir = os.path.join(
'results',
args.task,
args.embedding,
f'{args.start_time}__seed-{args.seed}')
# --------------------------------- INIT ---------------------------------
# Set up logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s - %(message)s",
datefmt="%d/%m/%Y %H:%M:%S",
level=logging.INFO)
# Check for GPUs
if torch.cuda.is_available():
assert torch.cuda.device_count() == 1 # This script doesn't support multi-gpu
args.device = torch.device("cuda")
logging.info("Using GPU (`%s`)", torch.cuda.get_device_name(0))
else:
args.device = torch.device("cpu")
logging.info("Using CPU")
# Set random seed for reproducibility
set_seed(seed_value=args.seed)
return args
def main(args):
""" Main function. """
# --------------------------------- DATA ---------------------------------
# Tokenizer
logging.disable(logging.INFO)
try:
tokenizer = BertTokenizer.from_pretrained(
os.path.join('pretrained-models', args.embedding),
do_lower_case=args.do_lower_case)
except OSError:
# For CharacterBert models use BertTokenizer.basic_tokenizer for tokenization
# and CharacterIndexer for indexing
tokenizer = BertTokenizer.from_pretrained(
os.path.join('pretrained-models', 'bert-base-uncased'),
do_lower_case=args.do_lower_case)
tokenizer = tokenizer.basic_tokenizer
characters_indexer = CharacterIndexer()
logging.disable(logging.NOTSET)
tokenization_function = tokenizer.tokenize
# Pre-processsing: apply basic tokenization (both) then split into wordpieces (BERT only)
data = {}
for split in ['train', 'test']:
if args.task == 'classification':
func = load_classification_dataset
elif args.task == 'sequence_labelling':
func = load_sequence_labelling_dataset
else:
raise NotImplementedError
data[split] = func(step=split, do_lower_case=args.do_lower_case)
retokenize(data[split], tokenization_function)
logging.info('Splitting training data into train / validation sets...')
data['validation'] = data['train'][:int(args.validation_ratio * len(data['train']))]
data['train'] = data['train'][int(args.validation_ratio * len(data['train'])):]
logging.info('New number of training sequences: %d', len(data['train']))
logging.info('New number of validation sequences: %d', len(data['validation']))
# Count target labels or classes
if args.task == 'classification':
counter_all = Counter(
[example.label for example in data['train'] + data['validation'] + data['test']])
counter = Counter(
[example.label for example in data['train']])
# Maximum sequence length is either 512 or maximum token sequence length + 3
max_seq_length = min(
512,
3 + max(
map(len, [
e.tokens_a if e.tokens_b is None else e.tokens_a + e.tokens_b
for e in data['train'] + data['validation'] + data['test']
])
)
)
elif args.task == 'sequence_labelling':
counter_all = Counter(
[label
for example in data['train'] + data['validation'] + data['test']
for label in example.label_sequence])
counter = Counter(
[label
for example in data['train']
for label in example.label_sequence])
# Maximum sequence length is either 512 or maximum token sequence length + 5
max_seq_length = min(
512,
5 + max(
map(len, [
e.token_sequence
for e in data['train'] + data['validation'] + data['test']
])
)
)
else:
raise NotImplementedError
labels = sorted(counter_all.keys())
num_labels = len(labels)
logging.info("Goal: predict the following labels")
for i, label in enumerate(labels):
logging.info("* %s: %s (count: %s)", label, i, counter[label])
# Input features: list[token indices] (BERT) or list[list[character indices]] (CharacterBERT)
pad_token_id = None
if 'character' not in args.embedding:
pad_token_id = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
pad_token_label_id = None
if args.task == 'sequence_labelling':
pad_token_label_id = CrossEntropyLoss().ignore_index
dataset = {}
logging.info("Maximum sequence lenght: %s", max_seq_length)
for split in data:
dataset[split] = build_features(
args,
split=split,
tokenizer=tokenizer \
if 'character' not in args.embedding \
else characters_indexer,
examples=data[split],
labels=labels,
pad_token_id=pad_token_id,
pad_token_label_id=pad_token_label_id,
max_seq_length=max_seq_length)
del data # Not used anymore
# --------------------------------- MODEL ---------------------------------
# Initialize model
if args.task == 'classification':
model = BertForSequenceClassification
elif args.task == 'sequence_labelling':
model = BertForTokenClassification
else:
raise NotImplementedError
logging.info('Loading `%s` model...', args.embedding)
logging.disable(logging.INFO)
config = BertConfig.from_pretrained(
os.path.join('pretrained-models', args.embedding),
num_labels=num_labels)
if 'character' not in args.embedding:
model = model.from_pretrained(
os.path.join('pretrained-models', args.embedding),
config=config)
else:
model = model(config=config)
model.bert = CharacterBertModel.from_pretrained(
os.path.join('pretrained-models', args.embedding),
config=config)
logging.disable(logging.NOTSET)
model.to(args.device)
logging.info('Model:\n%s', model)
# ------------------------------ TRAIN / EVAL ------------------------------
# Log args
logging.info('Using the following arguments for training:')
for k, v in vars(args).items():
logging.info("* %s: %s", k, v)
# Training
if args.do_train:
global_step, train_loss, best_val_metric, best_val_epoch = train(
args=args,
dataset=dataset,
model=model,
tokenizer=tokenizer,
labels=labels,
pad_token_label_id=pad_token_label_id
)
logging.info("global_step = %s, average training loss = %s", global_step, train_loss)
logging.info("Best performance: Epoch=%d, Value=%s", best_val_epoch, best_val_metric)
# Evaluation on test data
if args.do_predict:
# Load best model
if args.task == 'classification':
model = BertForSequenceClassification
elif args.task == 'sequence_labelling':
model = BertForTokenClassification
else:
raise NotImplementedError
logging.disable(logging.INFO)
if 'character' not in args.embedding:
model = model.from_pretrained(args.output_dir)
else:
state_dict = torch.load(
os.path.join(args.output_dir, 'pytorch_model.bin'), map_location='cpu')
model = model(config=config)
model.bert = CharacterBertModel(config=config)
model.load_state_dict(state_dict, strict=True)
logging.disable(logging.NOTSET)
model.to(args.device)
# Compute predictions and metrics
results, _ = evaluate(
args=args,
eval_dataset=dataset["test"],
model=model, labels=labels,
pad_token_label_id=pad_token_label_id
)
# Save metrics
with open(os.path.join(args.output_dir, 'performance_on_test_set.txt'), 'w') as f:
f.write(f'best validation score: {best_val_metric}\n')
f.write(f'best validation epoch: {best_val_epoch}\n')
f.write('--- Performance on test set ---\n')
for k, v in results.items():
f.write(f'{k}: {v}\n')
if __name__ == "__main__":
main(parse_args())
| 11,712 | 32.755043 | 97 | py |
character-bert | character-bert-main/download.py | # Adapted from:
# https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
""" Download pre-trained models from Google drive. """
import os
import argparse
import tarfile
import logging
import requests
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s - %(message)s",
datefmt="%d/%m/%Y %H:%M:%S",
level=logging.INFO)
MODEL_TO_URL = {
'general_character_bert': 'https://drive.google.com/open?id=11-kSfIwSWrPno6A4VuNFWuQVYD8Bg_aZ',
'medical_character_bert': 'https://drive.google.com/open?id=1LEnQHAqP9GxDYa0I3UrZ9YV2QhHKOh2m',
'general_bert': 'https://drive.google.com/open?id=1fwgKG2BziBZr7aQMK58zkbpI0OxWRsof',
'medical_bert': 'https://drive.google.com/open?id=1GmnXJFntcEfrRY4pVZpJpg7FH62m47HS',
}
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={ 'id' : id }, stream=True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def download_model(name):
if os.path.exists(os.path.join('pretrained-models', name)):
logging.info(f"Path {os.path.join('pretrained-models', name)} already exists.")
logging.info(f'Skipped download of {name} model.')
else:
os.makedirs(os.path.join('pretrained-models', name), exist_ok=False)
if name == 'bert-base-uncased':
urls = {
'model': 'https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin',
'vocabulary': 'https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt',
'config': 'https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json'
}
logging.info(f'Downloading {name} model (~420MB folder)')
for _, url in urls.items():
file_name = os.path.basename(url).split('-')[-1]
file_destination = os.path.join('pretrained-models', name, file_name)
response = requests.get(url)
with open(file_destination, mode='wb') as f:
f.write(response.content)
else:
file_destination = os.path.join('pretrained-models', 'model.tar.xz')
file_id = MODEL_TO_URL[name].split('id=')[-1]
logging.info(f'Downloading {name} model (~200MB tar.xz archive)')
download_file_from_google_drive(file_id, file_destination)
logging.info('Extracting model from archive (~420MB folder)')
tar = tarfile.open(file_destination, "r:xz")
tar.extractall(path=os.path.dirname(file_destination))
tar.close()
logging.info('Removing archive')
os.remove(file_destination)
logging.info('Done.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
required=True,
choices=list(MODEL_TO_URL.keys()) + ['bert-base-uncased', 'all'],
help="A keyword for downloading a specific pre-trained model"
)
args = parser.parse_args()
if args.model == 'all':
for model in list(MODEL_TO_URL.keys()) + ['bert-base-uncased']:
download_model(name=model)
else:
download_model(name=args.model)
if __name__ == "__main__":
main()
| 4,006 | 36.801887 | 115 | py |
character-bert | character-bert-main/utils/character_cnn.py | # Functions are imported/adapted from AllenAI's AllenNLP library:
# https://github.com/allenai/allennlp/
""" Indexer functions for ELMo-style character embeddings. """
from typing import Dict, List, Callable, Any
import torch
PADDING_VALUE = 0
def _make_bos_eos(
character: int,
padding_character: int,
beginning_of_word_character: int,
end_of_word_character: int,
max_word_length: int):
char_ids = [padding_character] * max_word_length
char_ids[0] = beginning_of_word_character
char_ids[1] = character
char_ids[2] = end_of_word_character
return char_ids
def pad_sequence_to_length(
sequence: List,
desired_length: int,
default_value: Callable[[], Any] = lambda: 0,
padding_on_right: bool = True,
) -> List:
"""
Take a list of objects and pads it to the desired length, returning the padded list.
The original list is not modified.
"""
# Truncates the sequence to the desired length.
if padding_on_right:
padded_sequence = sequence[:desired_length]
else:
padded_sequence = sequence[-desired_length:]
# Continues to pad with default_value() until we reach the desired length.
pad_length = desired_length - len(padded_sequence)
# This just creates the default value once, so if it's a list, and if it gets mutated
# later, it could cause subtle bugs. But the risk there is low, and this is much faster.
values_to_pad = [default_value()] * pad_length
if padding_on_right:
padded_sequence = padded_sequence + values_to_pad
else:
padded_sequence = values_to_pad + padded_sequence
return padded_sequence
class CharacterMapper:
"""
Maps individual tokens to sequences of character ids.
"""
max_word_length = 50
# char ids 0-255 come from utf-8 encoding bytes
# assign 256-300 to special chars
beginning_of_sentence_character = 256 # <begin sentence>
end_of_sentence_character = 257 # <end sentence>
beginning_of_word_character = 258 # <begin word>
end_of_word_character = 259 # <end word>
padding_character = 260 # <padding>
mask_character = 261 # <mask>
beginning_of_sentence_characters = _make_bos_eos(
beginning_of_sentence_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length,
)
end_of_sentence_characters = _make_bos_eos(
end_of_sentence_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length,
)
mask_characters = _make_bos_eos(
mask_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length,
)
pad_characters = [PADDING_VALUE - 1] * max_word_length
bos_token = "[CLS]"
eos_token = "[SEP]"
pad_token = "[PAD]"
mask_token = "[MASK]"
def __init__(self, tokens_to_add: Dict[str, int] = None) -> None:
self.tokens_to_add = tokens_to_add or {}
def convert_word_to_char_ids(self, word: str) -> List[int]:
if word in self.tokens_to_add:
char_ids = [CharacterMapper.padding_character] * CharacterMapper.max_word_length
char_ids[0] = CharacterMapper.beginning_of_word_character
char_ids[1] = self.tokens_to_add[word]
char_ids[2] = CharacterMapper.end_of_word_character
elif word == CharacterMapper.bos_token:
char_ids = CharacterMapper.beginning_of_sentence_characters
elif word == CharacterMapper.eos_token:
char_ids = CharacterMapper.end_of_sentence_characters
elif word == CharacterMapper.mask_token:
char_ids = CharacterMapper.mask_characters
elif word == CharacterMapper.pad_token:
char_ids = CharacterMapper.pad_characters
else:
word_encoded = word.encode("utf-8", "ignore")[
: (CharacterMapper.max_word_length - 2)
]
char_ids = [CharacterMapper.padding_character] * CharacterMapper.max_word_length
char_ids[0] = CharacterMapper.beginning_of_word_character
for k, chr_id in enumerate(word_encoded, start=1):
char_ids[k] = chr_id
char_ids[len(word_encoded) + 1] = CharacterMapper.end_of_word_character
# +1 one for masking
return [c + 1 for c in char_ids]
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
class CharacterIndexer:
def __init__(self) -> None:
self._mapper = CharacterMapper()
def tokens_to_indices(self, tokens: List[str]) -> List[List[int]]:
return [self._mapper.convert_word_to_char_ids(token) for token in tokens]
def _default_value_for_padding(self):
return [PADDING_VALUE] * CharacterMapper.max_word_length
def as_padded_tensor(self, batch: List[List[str]], as_tensor=True, maxlen=None) -> torch.Tensor:
if maxlen is None:
maxlen = max(map(len, batch))
batch_indices = [self.tokens_to_indices(tokens) for tokens in batch]
padded_batch = [
pad_sequence_to_length(
indices, maxlen,
default_value=self._default_value_for_padding)
for indices in batch_indices
]
if as_tensor:
return torch.LongTensor(padded_batch)
else:
return padded_batch
if __name__ == "__main__":
inputs = [
'[CLS] hi [PAD] [SEP]'.split(),
'[CLS] hello , my [MASK] is hicham [SEP]'.split()
]
output = CharacterIndexer().as_padded_tensor(inputs)
print('input:', inputs)
print('output.shape:', output.shape)
print('output:', output)
| 5,864 | 33.910714 | 100 | py |
character-bert | character-bert-main/utils/misc.py | """ Miscellaneous utils. """
import random
import logging
import torch
import numpy as np
def set_seed(seed_value):
""" Sets the random seed to a given value. """
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed_value)
logging.info("Random seed: %d", seed_value)
| 391 | 23.5 | 50 | py |
character-bert | character-bert-main/utils/training.py | # Functions are adapted from Huggingface's transformers library:
# https://github.com/huggingface/transformers
""" Defines training and evaluation functions. """
import os
import logging
import datetime
import tqdm
import numpy as np
import sklearn.metrics as sklearn_metrics
import metrics.sequence_labelling as seqeval_metrics
import torch
from torch.utils.data import SequentialSampler, RandomSampler, DataLoader
from transformers import AdamW, get_linear_schedule_with_warmup
from utils.misc import set_seed
def train(args, dataset, model, tokenizer, labels, pad_token_label_id):
""" Trains the given model on the given dataset. """
train_dataset = dataset['train']
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size)
n_train_steps__single_epoch = len(train_dataloader) // args.gradient_accumulation_steps
n_train_steps = n_train_steps__single_epoch * args.num_train_epochs
args.logging_steps = n_train_steps__single_epoch
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=int(args.warmup_ratio*n_train_steps),
num_training_steps=n_train_steps
)
# Train!
logging.info("***** Running training *****")
logging.info(" Num examples = %d", len(train_dataset))
logging.info(" Num Epochs = %d", args.num_train_epochs)
logging.info(
" Total train batch size (w. accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps
)
logging.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logging.info(" Total optimization steps = %d", n_train_steps)
logging.info(" Using linear warmup (ratio=%s)", args.warmup_ratio)
logging.info(" Using weight decay (value=%s)", args.weight_decay)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
tr_loss, logging_loss = 0.0, 0.0
best_metric, best_epoch = -1.0, -1 # Init best -1 so that 0 > best
model.zero_grad()
train_iterator = tqdm.trange(epochs_trained, int(args.num_train_epochs), desc="Epoch")
set_seed(seed_value=args.seed) # Added here for reproductibility
for num_epoch in train_iterator:
epoch_iterator = tqdm.tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"labels": batch[3]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if global_step % args.logging_steps == 0:
# Log metrics
# -- Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(
args=args,
eval_dataset=dataset["validation"],
model=model, labels=labels,
pad_token_label_id=pad_token_label_id
)
logging_loss = tr_loss
metric = results['f1']
if metric > best_metric:
best_metric = metric
best_epoch = num_epoch
# Save model checkpoint
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
model.save_pretrained(args.output_dir)
if 'character' not in args.embedding:
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
logging.info("Saving model checkpoint to %s", args.output_dir)
#torch.save(optimizer.state_dict(), os.path.join(args.output_dir, "optimizer.pt"))
#torch.save(scheduler.state_dict(), os.path.join(args.output_dir, "scheduler.pt"))
#logging.info("Saving optimizer and scheduler states to %s", args.output_dir)
return global_step, tr_loss / global_step, best_metric, best_epoch
def evaluate(args, eval_dataset, model, labels, pad_token_label_id):
""" Evaluates the given model on the given dataset. """
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size)
# Evaluate!
logging.info("***** Running evaluation *****")
logging.info(" Num examples = %d", len(eval_dataset))
logging.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm.tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"labels": batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
label_map = {i: label for i, label in enumerate(labels)}
if args.task == 'classification':
preds_list = np.argmax(preds, axis=1)
results = {
"loss": eval_loss,
"precision": sklearn_metrics.precision_score(out_label_ids, preds_list, average='micro'),
"recall": sklearn_metrics.recall_score(out_label_ids, preds_list, average='micro'),
"f1": sklearn_metrics.f1_score(out_label_ids, preds_list, average='micro'),
"accuracy": sklearn_metrics.accuracy_score(out_label_ids, preds_list),
}
else:
preds = np.argmax(preds, axis=2)
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": seqeval_metrics.precision_score(out_label_list, preds_list),
"recall": seqeval_metrics.recall_score(out_label_list, preds_list),
"f1": seqeval_metrics.f1_score(out_label_list, preds_list),
}
logging.info("***** Eval results *****")
for key in sorted(results.keys()):
logging.info(" %s = %s", key, str(results[key]))
return results, preds_list
| 8,849 | 39.227273 | 106 | py |
character-bert | character-bert-main/utils/data.py | # Functions are adapted from Huggingface's transformers library:
# https://github.com/huggingface/transformers
""" Helper functions. """
import random
import logging
from collections import namedtuple
import tqdm
import numpy as np
import torch
from torch.utils.data import TensorDataset
def retokenize(examples, tokenization_function):
for i, example in tqdm.tqdm(enumerate(examples), desc='retokenizing examples...'):
if type(example).__name__ == 'ClassificationExample':
assert example.tokens_a
if example.tokens_b is not None:
assert example.tokens_b
assert example.label
new_tokens_a = []
for token_a in example.tokens_a:
new_tokens_a.extend(tokenization_function(token_a))
example = example._replace(tokens_a=new_tokens_a if new_tokens_a else [''])
if example.tokens_b is not None:
new_tokens_b = []
for token_b in example.tokens_b:
new_tokens_b.extend(tokenization_function(token_b))
example = example._replace(tokens_b=new_tokens_b if new_tokens_b else [''])
elif type(example).__name__ == 'SequenceLabellingExample':
tokens = example.token_sequence
labels = example.label_sequence
assert tokens
assert len(tokens) == len(labels)
new_tokens, new_labels = [], []
for token, label in zip(tokens, labels):
retokenized_token = tokenization_function(token)
if retokenized_token != [token]:
if label != 'O':
label_pos = label[:2]
label_type = label.split('-')[-1]
if label_pos == 'B-':
new_label = [label] + (len(retokenized_token) - 1) * ['I-' + label_type]
elif label_pos == 'I-':
new_label = [label] * len(retokenized_token)
else:
new_label = [label] * len(retokenized_token)
new_tokens.extend(retokenized_token)
new_labels.extend(new_label)
else:
new_tokens.append(token)
new_labels.append(label)
if new_tokens:
example = example._replace(token_sequence=new_tokens)
example = example._replace(label_sequence=new_labels)
else:
example = example._replace(token_sequence=[''])
example = example._replace(label_sequence=['O'])
examples[i] = example
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_examples_to_features__classification(
args, tokenizer, examples, labels,
pad_token_id, pad_token_label_id, max_seq_length):
"""Converts classification examples into pytorch tensors."""
InputFeatures = namedtuple(
'ClassificationFeatures',
['input_ids', 'input_mask', 'segment_ids', 'label_id'])
label_map = {label : i for i, label in enumerate(labels)}
features = []
max_len = 0
for (ex_index, example) in enumerate(examples):
tokens_a = example.tokens_a
tokens_b = None
if example.tokens_b:
tokens_b = example.tokens_b
seq_len = len(tokens_a) + len(tokens_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
seq_len = len(tokens_a)
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
# Tokenization of inputs
if args.embedding == 'bert-base-uncased':
input_ids = tokenizer.convert_tokens_to_ids(tokens)
else:
input_ids = tokenizer.as_padded_tensor([tokens], maxlen=max_seq_length)[0]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(tokens)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_mask)
if args.embedding == 'bert-base-uncased':
input_ids += [pad_token_id] * padding_length
input_mask += [0] * padding_length
segment_ids += [0] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 3:
logging.info("*** Example ***")
logging.info("tokens: %s", " ".join([str(x) for x in tokens]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
)
return features
def convert_examples_to_features__tagging(
args, tokenizer, examples, labels,
pad_token_id, pad_token_label_id, max_seq_length):
"""Converts tagging examples into pytorch tensors."""
InputFeatures = namedtuple(
'SequenceLabelligFeatures',
['input_ids', 'input_mask', 'segment_ids', 'label_ids'])
label_map = {label: i for i, label in enumerate(labels)}
data_iterator = tqdm.tqdm(enumerate(examples), total=len(examples))
features = []
for i, example in data_iterator:
tokens = example.token_sequence
labels = example.label_sequence
#label_ids = [label_map[label] for label in labels]
label_ids = []
for token, label in zip(tokens, labels):
if token.startswith('##'):
label_ids.append(pad_token_label_id)
else:
label_ids.append(label_map[label])
# Account for [CLS] and [SEP] with "- 2"
special_tokens_count = 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# Handle [SEP]
tokens += ["[SEP]"]
label_ids += [pad_token_label_id]
segment_ids = [0] * len(tokens) # Only one sentence, so all segment ids are 0
# Handle [CLS]
tokens = ["[CLS]"] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [0] + segment_ids
# Tokenization of inputs
if args.embedding == 'bert-base-uncased':
input_ids = tokenizer.convert_tokens_to_ids(tokens)
else:
input_ids = tokenizer.as_padded_tensor([tokens], maxlen=max_seq_length)[0]
# The mask has 1 for real tokens and 0 for padding tokens.
# Only real tokens are attended to.
input_mask = [1] * len(tokens)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_mask)
if args.embedding == 'bert-base-uncased':
input_ids += [pad_token_id] * padding_length
input_mask += [0] * padding_length
segment_ids += [0] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if i < 3:
logging.info("*** Example ***")
logging.info("tokens: %s", " ".join([str(x) for x in tokens]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("labels: %s", " ".join([str(x) for x in labels]))
logging.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids)
)
return features
def build_features(
args, split, tokenizer, examples, labels,
pad_token_id, pad_token_label_id, max_seq_length):
logging.info("Building features from data...")
if args.task == 'sequence_labelling':
func = convert_examples_to_features__tagging
else:
func = convert_examples_to_features__classification
features = func(
args, tokenizer, examples, labels,
pad_token_id, pad_token_label_id, max_seq_length
)
# Convert to Tensors and build dataset
if args.embedding == 'bert-base-uncased':
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
else:
all_input_ids = torch.tensor([f.input_ids.tolist() for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if args.task == 'sequence_labelling':
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
else:
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
| 13,028 | 41.029032 | 100 | py |
character-bert | character-bert-main/modeling/character_cnn.py | # Functions are imported/adapted from AllenAI's AllenNLP library:
# https://github.com/allenai/allennlp/
"""Defines the character embedding module (adapted from ELMo)"""
import json
from typing import Dict, Callable
import numpy
import torch
from utils.character_cnn import CharacterMapper, CharacterIndexer
class Highway(torch.nn.Module):
"""
A [Highway layer](https://arxiv.org/abs/1505.00387) does a gated combination of a linear
transformation and a non-linear transformation of its input. :math:`y = g * x + (1 - g) *
f(A(x))`, where :math:`A` is a linear transformation, :math:`f` is an element-wise
non-linearity, and :math:`g` is an element-wise gate, computed as :math:`sigmoid(B(x))`.
This module will apply a fixed number of highway layers to its input, returning the final
result.
# Parameters
input_dim : `int`, required
The dimensionality of :math:`x`. We assume the input has shape `(batch_size, ...,
input_dim)`.
num_layers : `int`, optional (default=`1`)
The number of highway layers to apply to the input.
activation : `Callable[[torch.Tensor], torch.Tensor]`, optional (default=`torch.nn.functional.relu`)
The non-linearity to use in the highway layers.
"""
def __init__(
self,
input_dim: int,
num_layers: int = 1,
activation: Callable[[torch.Tensor], torch.Tensor] = torch.nn.functional.relu,
) -> None:
super().__init__()
self._input_dim = input_dim
self._layers = torch.nn.ModuleList(
[torch.nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]
)
self._activation = activation
for layer in self._layers:
# We should bias the highway layer to just carry its input forward. We do that by
# setting the bias on `B(x)` to be positive, because that means `g` will be biased to
# be high, so we will carry the input forward. The bias on `B(x)` is the second half
# of the bias vector in each Linear layer.
layer.bias[input_dim:].data.fill_(1)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
current_input = inputs
for layer in self._layers:
projected_input = layer(current_input)
linear_part = current_input
# NOTE: if you modify this, think about whether you should modify the initialization
# above, too.
nonlinear_part, gate = projected_input.chunk(2, dim=-1)
nonlinear_part = self._activation(nonlinear_part)
gate = torch.sigmoid(gate)
current_input = gate * linear_part + (1 - gate) * nonlinear_part
return current_input
class CharacterCNN(torch.nn.Module):
"""
Computes context insensitive token representations from each token's characters.
"""
def __init__(self,
output_dim: int = 768,
requires_grad: bool = True) -> None:
super().__init__()
self._options = {
'char_cnn': {
'activation': 'relu',
'filters': [
[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 1024]
],
'n_highway': 2,
'embedding': {'dim': 16},
'n_characters': 262,
'max_characters_per_token': 50
}
}
self.output_dim = output_dim
self.requires_grad = requires_grad
self._init_weights()
# Cache the arrays for use in forward -- +1 due to masking.
self._beginning_of_sentence_characters = torch.from_numpy(
numpy.array(CharacterMapper.beginning_of_sentence_characters) + 1
)
self._end_of_sentence_characters = torch.from_numpy(
numpy.array(CharacterMapper.end_of_sentence_characters) + 1
)
def _init_weights(self):
self._init_char_embedding()
self._init_cnn_weights()
self._init_highway()
self._init_projection()
def _init_char_embedding(self):
weights = numpy.zeros(
(
self._options["char_cnn"]["n_characters"] + 1,
self._options["char_cnn"]["embedding"]["dim"]
),
dtype="float32")
weights[-1, :] *= 0. # padding
self._char_embedding_weights = torch.nn.Parameter(
torch.FloatTensor(weights), requires_grad=self.requires_grad
)
def _init_cnn_weights(self):
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
char_embed_dim = cnn_options["embedding"]["dim"]
convolutions = []
for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=char_embed_dim, out_channels=num,
kernel_size=width, bias=True)
conv.weight.requires_grad = self.requires_grad
conv.bias.requires_grad = self.requires_grad
convolutions.append(conv)
self.add_module("char_conv_{}".format(i), conv)
self._convolutions = convolutions
def _init_highway(self):
# the highway layers have same dimensionality as the number of cnn filters
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
n_filters = sum(f[1] for f in filters)
n_highway = cnn_options["n_highway"]
self._highways = Highway(n_filters, n_highway, activation=torch.nn.functional.relu)
for k in range(n_highway):
# The AllenNLP highway is one matrix multplication with concatenation of
# transform and carry weights.
self._highways._layers[k].weight.requires_grad = self.requires_grad
self._highways._layers[k].bias.requires_grad = self.requires_grad
def _init_projection(self):
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
n_filters = sum(f[1] for f in filters)
self._projection = torch.nn.Linear(n_filters, self.output_dim, bias=True)
self._projection.weight.requires_grad = self.requires_grad
self._projection.bias.requires_grad = self.requires_grad
def get_output_dim(self):
return self.output_dim
def forward(self, inputs: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
inputs: ``torch.Tensor``
Shape ``(batch_size, sequence_length, 50)`` of character ids representing the
current batch.
Returns
-------
embeddings: ``torch.Tensor``
Shape ``(batch_size, sequence_length, embedding_dim)`` tensor with context
insensitive token representations.
"""
# Add BOS/EOS
mask = ((inputs > 0).long().sum(dim=-1) > 0).long()
#character_ids_with_bos_eos, mask_with_bos_eos = add_sentence_boundary_token_ids(
# inputs, mask, self._beginning_of_sentence_characters, self._end_of_sentence_characters
#)
character_ids_with_bos_eos, mask_with_bos_eos = inputs, mask
# the character id embedding
max_chars_per_token = self._options["char_cnn"]["max_characters_per_token"]
# (batch_size * sequence_length, max_chars_per_token, embed_dim)
character_embedding = torch.nn.functional.embedding(
character_ids_with_bos_eos.view(-1, max_chars_per_token), self._char_embedding_weights
)
# run convolutions
cnn_options = self._options["char_cnn"]
if cnn_options["activation"] == "tanh":
activation = torch.tanh
elif cnn_options["activation"] == "relu":
activation = torch.nn.functional.relu
else:
raise Exception("Unknown activation")
# (batch_size * sequence_length, embed_dim, max_chars_per_token)
character_embedding = torch.transpose(character_embedding, 1, 2)
convs = []
for i in range(len(self._convolutions)):
conv = getattr(self, "char_conv_{}".format(i))
convolved = conv(character_embedding)
# (batch_size * sequence_length, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convolved = activation(convolved)
convs.append(convolved)
# (batch_size * sequence_length, n_filters)
token_embedding = torch.cat(convs, dim=-1)
# apply the highway layers (batch_size * sequence_length, n_filters)
token_embedding = self._highways(token_embedding)
# final projection (batch_size * sequence_length, embedding_dim)
token_embedding = self._projection(token_embedding)
# reshape to (batch_size, sequence_length, embedding_dim)
batch_size, sequence_length, _ = character_ids_with_bos_eos.size()
return token_embedding.view(batch_size, sequence_length, -1)
if __name__ == "__main__":
model = CharacterCNN(output_dim=768)
# Test with a batch of 2 sentences
mapper = CharacterMapper()
sentences = [
'[CLS] hi , my name is Hicham [SEP]'.split(),
'[CLS] hello Hicham [SEP]'.split()
]
print('Input sequences:', sentences)
indexer = CharacterIndexer()
inputs = indexer.as_padded_tensor(sentences)
print('Input shape:', inputs.shape)
print('Output shape:', model.forward(inputs).shape)
| 9,562 | 37.560484 | 104 | py |
character-bert | character-bert-main/modeling/character_bert.py | # Functions are adapted from Huggingface's transformers library:
# https://github.com/allenai/allennlp/
""" Defines the main CharacterBERT PyTorch class. """
import torch
from torch import nn
from transformers.modeling_bert import BertPreTrainedModel, BertEncoder, BertPooler
from modeling.character_cnn import CharacterCNN
class BertCharacterEmbeddings(nn.Module):
""" Construct the embeddings from char-cnn, position and token_type embeddings. """
def __init__(self, config):
super(BertCharacterEmbeddings, self).__init__()
# This is the module that computes word embeddings from a token's characters
self.word_embeddings = CharacterCNN(
requires_grad=True,
output_dim=config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids[:, :, 0].size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids[:, :, 0].device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids[:, :, 0])
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids[:, :, 0])
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class CharacterBertModel(BertPreTrainedModel):
""" BertModel using char-cnn embeddings instead of wordpiece embeddings. """
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertCharacterEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
**kwargs
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids[:,:,0].size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = causal_mask.to(
attention_mask.dtype
) # causal and attention masks must have same type with pytorch version < 1.3
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
elif encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format(
encoder_hidden_shape, encoder_attention_mask.shape
)
)
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids,
token_type_ids=token_type_ids
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
if __name__ == "__main__":
import logging
from download import download_model
logging.basicConfig(level=logging.INFO)
download_model('medical_character_bert')
path = "pretrained-models/medical_character_bert/"
model = CharacterBertModel.from_pretrained(path)
logging.info('%s', model)
| 9,516 | 44.754808 | 122 | py |
client-superstaq | client-superstaq-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import os
import sys
from typing import List
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "docs-superstaq"
copyright = "2023, ColdQuanta, Inc., DBA Infleqtion" # pylint: disable=redefined-builtin
author = "ColdQuanta, Inc., DBA Infleqtion"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"nbsphinx",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.mathjax", # math rendering in html
"sphinx.ext.napoleon", # allows google- and numpy- style docstrings
"IPython.sphinxext.ipython_console_highlighting",
]
# since our notebooks can involve network I/O (or even costing $), we don't want them to be
# run every time we build the docs. Instead, just use the pre-executed outputs.
nbsphinx_execute = "never"
# In addition, we set the mathjax path to v3, which allows \ket{} (and other commands) to render
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"
autosummary_generate = True
templates_path = ["_templates"]
# Using `modules` in index.rst gets the first package and ignores additional included packages.
# Listing out modules explicitly causes building docs to throw error looking for `modules.rst`,
# so add to excluded search patterns as per suggestion here: https://stackoverflow.com/a/15438962
exclude_patterns: List[str] = [
"modules.rst",
"setup.rst",
"general_superstaq.check.rst",
"cirq_superstaq.ops.rst",
]
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_theme_options = {
"logo_only": True,
}
html_logo = "_static/logos/Superstaq_color.png"
html_css_files = [
"css/docs-superstaq.css",
]
html_favicon = "_static/logos/Infleqtion_logo.png"
| 2,428 | 35.253731 | 97 | py |
focal_calibration | focal_calibration-main/evaluate.py | import os
import sys
import torch
import random
import argparse
from torch import nn
import matplotlib.pyplot as plt
import torch.backends.cudnn as cudnn
# Import dataloaders
import Data.cifar10 as cifar10
import Data.cifar100 as cifar100
import Data.tiny_imagenet as tiny_imagenet
# Import network architectures
from Net.resnet_tiny_imagenet import resnet50 as resnet50_ti
from Net.resnet import resnet50, resnet110
from Net.wide_resnet import wide_resnet_cifar
from Net.densenet import densenet121
# Import metrics to compute
from Metrics.metrics import test_classification_net_logits
from Metrics.metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss
# Import temperature scaling and NLL utilities
from temperature_scaling import ModelWithTemperature
# Dataset params
dataset_num_classes = {
'cifar10': 10,
'cifar100': 100,
'tiny_imagenet': 200
}
dataset_loader = {
'cifar10': cifar10,
'cifar100': cifar100,
'tiny_imagenet': tiny_imagenet
}
# Mapping model name to model function
models = {
'resnet50': resnet50,
'resnet50_ti': resnet50_ti,
'resnet110': resnet110,
'wide_resnet': wide_resnet_cifar,
'densenet121': densenet121
}
def parseArgs():
default_dataset = 'cifar10'
dataset_root = './'
model = 'resnet50'
save_loc = './'
saved_model_name = 'resnet50_cross_entropy_350.model'
num_bins = 15
model_name = None
train_batch_size = 128
test_batch_size = 128
cross_validation_error = 'ece'
parser = argparse.ArgumentParser(
description="Evaluating a single model on calibration metrics.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", type=str, default=default_dataset,
dest="dataset", help='dataset to test on')
parser.add_argument("--dataset-root", type=str, default=dataset_root,
dest="dataset_root", help='root path of the dataset (for tiny imagenet)')
parser.add_argument("--model-name", type=str, default=model_name,
dest="model_name", help='name of the model')
parser.add_argument("--model", type=str, default=model, dest="model",
help='Model to test')
parser.add_argument("--save-path", type=str, default=save_loc,
dest="save_loc",
help='Path to import the model')
parser.add_argument("--saved_model_name", type=str, default=saved_model_name,
dest="saved_model_name", help="file name of the pre-trained model")
parser.add_argument("--num-bins", type=int, default=num_bins, dest="num_bins",
help='Number of bins')
parser.add_argument("-g", action="store_true", dest="gpu",
help="Use GPU")
parser.set_defaults(gpu=True)
parser.add_argument("-da", action="store_true", dest="data_aug",
help="Using data augmentation")
parser.set_defaults(data_aug=True)
parser.add_argument("-b", type=int, default=train_batch_size,
dest="train_batch_size", help="Batch size")
parser.add_argument("-tb", type=int, default=test_batch_size,
dest="test_batch_size", help="Test Batch size")
parser.add_argument("--cverror", type=str, default=cross_validation_error,
dest="cross_validation_error", help='Error function to do temp scaling')
parser.add_argument("-log", action="store_true", dest="log",
help="whether to print log data")
return parser.parse_args()
def get_logits_labels(data_loader, net):
logits_list = []
labels_list = []
net.eval()
with torch.no_grad():
for data, label in data_loader:
data = data.cuda()
logits = net(data)
logits_list.append(logits)
labels_list.append(label)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
return logits, labels
if __name__ == "__main__":
# Checking if GPU is available
cuda = False
if (torch.cuda.is_available()):
cuda = True
# Setting additional parameters
torch.manual_seed(1)
device = torch.device("cuda" if cuda else "cpu")
args = parseArgs()
if args.model_name is None:
args.model_name = args.model
dataset = args.dataset
dataset_root = args.dataset_root
model_name = args.model_name
save_loc = args.save_loc
saved_model_name = args.saved_model_name
num_bins = args.num_bins
cross_validation_error = args.cross_validation_error
# Taking input for the dataset
num_classes = dataset_num_classes[dataset]
if (args.dataset == 'tiny_imagenet'):
val_loader = dataset_loader[args.dataset].get_data_loader(
root=args.dataset_root,
split='val',
batch_size=args.test_batch_size,
pin_memory=args.gpu)
test_loader = dataset_loader[args.dataset].get_data_loader(
root=args.dataset_root,
split='val',
batch_size=args.test_batch_size,
pin_memory=args.gpu)
else:
_, val_loader = dataset_loader[args.dataset].get_train_valid_loader(
batch_size=args.train_batch_size,
augment=args.data_aug,
random_seed=1,
pin_memory=args.gpu
)
test_loader = dataset_loader[args.dataset].get_test_loader(
batch_size=args.test_batch_size,
pin_memory=args.gpu
)
model = models[model_name]
net = model(num_classes=num_classes, temp=1.0)
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
net.load_state_dict(torch.load(args.save_loc + args.saved_model_name))
nll_criterion = nn.CrossEntropyLoss().cuda()
ece_criterion = ECELoss().cuda()
adaece_criterion = AdaptiveECELoss().cuda()
cece_criterion = ClasswiseECELoss().cuda()
logits, labels = get_logits_labels(test_loader, net)
conf_matrix, p_accuracy, _, _, _ = test_classification_net_logits(logits, labels)
p_ece = ece_criterion(logits, labels).item()
p_adaece = adaece_criterion(logits, labels).item()
p_cece = cece_criterion(logits, labels).item()
p_nll = nll_criterion(logits, labels).item()
res_str = '{:s}&{:.4f}&{:.4f}&{:.4f}&{:.4f}&{:.4f}'.format(saved_model_name, 1-p_accuracy, p_nll, p_ece, p_adaece, p_cece)
# Printing the required evaluation metrics
if args.log:
print (conf_matrix)
print ('Test error: ' + str((1 - p_accuracy)))
print ('Test NLL: ' + str(p_nll))
print ('ECE: ' + str(p_ece))
print ('AdaECE: ' + str(p_adaece))
print ('Classwise ECE: ' + str(p_cece))
scaled_model = ModelWithTemperature(net, args.log)
scaled_model.set_temperature(val_loader, cross_validate=cross_validation_error)
T_opt = scaled_model.get_temperature()
logits, labels = get_logits_labels(test_loader, scaled_model)
conf_matrix, accuracy, _, _, _ = test_classification_net_logits(logits, labels)
ece = ece_criterion(logits, labels).item()
adaece = adaece_criterion(logits, labels).item()
cece = cece_criterion(logits, labels).item()
nll = nll_criterion(logits, labels).item()
res_str += '&{:.4f}({:.2f})&{:.4f}&{:.4f}&{:.4f}'.format(nll, T_opt, ece, adaece, cece)
if args.log:
print ('Optimal temperature: ' + str(T_opt))
print (conf_matrix)
print ('Test error: ' + str((1 - accuracy)))
print ('Test NLL: ' + str(nll))
print ('ECE: ' + str(ece))
print ('AdaECE: ' + str(adaece))
print ('Classwise ECE: ' + str(cece))
# Test NLL & ECE & AdaECE & Classwise ECE
print(res_str) | 7,842 | 34.328829 | 130 | py |
focal_calibration | focal_calibration-main/train_utils.py | '''
This module contains methods for training models with different loss functions.
'''
import torch
from torch.nn import functional as F
from torch import nn
from Losses.loss import cross_entropy, focal_loss, focal_loss_adaptive
from Losses.loss import mmce, mmce_weighted
from Losses.loss import brier_score
loss_function_dict = {
'cross_entropy': cross_entropy,
'focal_loss': focal_loss,
'focal_loss_adaptive': focal_loss_adaptive,
'mmce': mmce,
'mmce_weighted': mmce_weighted,
'brier_score': brier_score
}
def train_single_epoch(epoch,
model,
train_loader,
optimizer,
device,
loss_function='cross_entropy',
gamma=1.0,
lamda=1.0,
loss_mean=False):
'''
Util method for training a model for a single epoch.
'''
log_interval = 10
model.train()
train_loss = 0
num_samples = 0
for batch_idx, (data, labels) in enumerate(train_loader):
data = data.to(device)
labels = labels.to(device)
optimizer.zero_grad()
logits = model(data)
if ('mmce' in loss_function):
loss = (len(data) * loss_function_dict[loss_function](logits, labels, gamma=gamma, lamda=lamda, device=device))
else:
loss = loss_function_dict[loss_function](logits, labels, gamma=gamma, lamda=lamda, device=device)
if loss_mean:
loss = loss / len(data)
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 2)
train_loss += loss.item()
optimizer.step()
num_samples += len(data)
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader) * len(data),
100. * batch_idx / len(train_loader),
loss.item()))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / num_samples))
return train_loss / num_samples
def test_single_epoch(epoch,
model,
test_val_loader,
device,
loss_function='cross_entropy',
gamma=1.0,
lamda=1.0):
'''
Util method for testing a model for a single epoch.
'''
model.eval()
loss = 0
num_samples = 0
with torch.no_grad():
for i, (data, labels) in enumerate(test_val_loader):
data = data.to(device)
labels = labels.to(device)
logits = model(data)
if ('mmce' in loss_function):
loss += (len(data) * loss_function_dict[loss_function](logits, labels, gamma=gamma, lamda=lamda, device=device).item())
else:
loss += loss_function_dict[loss_function](logits, labels, gamma=gamma, lamda=lamda, device=device).item()
num_samples += len(data)
print('======> Test set loss: {:.4f}'.format(
loss / num_samples))
return loss / num_samples | 3,176 | 30.77 | 135 | py |
focal_calibration | focal_calibration-main/temperature_scaling.py | '''
Code to perform temperature scaling. Adapted from https://github.com/gpleiss/temperature_scaling
'''
import torch
import numpy as np
from torch import nn, optim
from torch.nn import functional as F
from Metrics.metrics import ECELoss
class ModelWithTemperature(nn.Module):
"""
A thin decorator, which wraps a model with temperature scaling
model (nn.Module):
A classification neural network
NB: Output of the neural network should be the classification logits,
NOT the softmax (or log softmax)!
"""
def __init__(self, model, log=True):
super(ModelWithTemperature, self).__init__()
self.model = model
self.temperature = 1.0
self.log = log
def forward(self, input):
logits = self.model(input)
return self.temperature_scale(logits)
def temperature_scale(self, logits):
"""
Perform temperature scaling on logits
"""
# Expand temperature to match the size of logits
return logits / self.temperature
def set_temperature(self,
valid_loader,
cross_validate='ece'):
"""
Tune the tempearature of the model (using the validation set) with cross-validation on ECE or NLL
"""
self.cuda()
self.model.eval()
nll_criterion = nn.CrossEntropyLoss().cuda()
ece_criterion = ECELoss().cuda()
# First: collect all the logits and labels for the validation set
logits_list = []
labels_list = []
with torch.no_grad():
for input, label in valid_loader:
input = input.cuda()
logits = self.model(input)
logits_list.append(logits)
labels_list.append(label)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
# Calculate NLL and ECE before temperature scaling
before_temperature_nll = nll_criterion(logits, labels).item()
before_temperature_ece = ece_criterion(logits, labels).item()
if self.log:
print('Before temperature - NLL: %.3f, ECE: %.3f' % (before_temperature_nll, before_temperature_ece))
nll_val = 10 ** 7
ece_val = 10 ** 7
T_opt_nll = 1.0
T_opt_ece = 1.0
T = 0.1
for i in range(100):
self.temperature = T
self.cuda()
after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item()
if nll_val > after_temperature_nll:
T_opt_nll = T
nll_val = after_temperature_nll
if ece_val > after_temperature_ece:
T_opt_ece = T
ece_val = after_temperature_ece
T += 0.1
if cross_validate == 'ece':
self.temperature = T_opt_ece
else:
self.temperature = T_opt_nll
self.cuda()
# Calculate NLL and ECE after temperature scaling
after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item()
if self.log:
print('Optimal temperature: %.3f' % self.temperature)
print('After temperature - NLL: %.3f, ECE: %.3f' % (after_temperature_nll, after_temperature_ece))
return self
def get_temperature(self):
return self.temperature | 3,591 | 33.209524 | 113 | py |
focal_calibration | focal_calibration-main/train.py | '''
Script for training models.
'''
from torch import optim
import torch
import torch.utils.data
import argparse
import torch.backends.cudnn as cudnn
import random
import json
import sys
# Import dataloaders
import Data.cifar10 as cifar10
import Data.cifar100 as cifar100
import Data.tiny_imagenet as tiny_imagenet
# Import network models
from Net.resnet import resnet50, resnet110
from Net.resnet_tiny_imagenet import resnet50 as resnet50_ti
from Net.wide_resnet import wide_resnet_cifar
from Net.densenet import densenet121
# Import loss functions
from Losses.loss import cross_entropy, focal_loss, focal_loss_adaptive
from Losses.loss import mmce, mmce_weighted
from Losses.loss import brier_score
# Import train and validation utilities
from train_utils import train_single_epoch, test_single_epoch
# Import validation metrics
from Metrics.metrics import test_classification_net
dataset_num_classes = {
'cifar10': 10,
'cifar100': 100,
'tiny_imagenet': 200
}
dataset_loader = {
'cifar10': cifar10,
'cifar100': cifar100,
'tiny_imagenet': tiny_imagenet
}
models = {
'resnet50': resnet50,
'resnet50_ti': resnet50_ti,
'resnet110': resnet110,
'wide_resnet': wide_resnet_cifar,
'densenet121': densenet121
}
def loss_function_save_name(loss_function,
scheduled=False,
gamma=1.0,
gamma1=1.0,
gamma2=1.0,
gamma3=1.0,
lamda=1.0):
res_dict = {
'cross_entropy': 'cross_entropy',
'focal_loss': 'focal_loss_gamma_' + str(gamma),
'focal_loss_adaptive': 'focal_loss_adaptive_gamma_' + str(gamma),
'mmce': 'mmce_lamda_' + str(lamda),
'mmce_weighted': 'mmce_weighted_lamda_' + str(lamda),
'brier_score': 'brier_score'
}
if (loss_function == 'focal_loss' and scheduled == True):
res_str = 'focal_loss_scheduled_gamma_' + str(gamma1) + '_' + str(gamma2) + '_' + str(gamma3)
else:
res_str = res_dict[loss_function]
return res_str
def parseArgs():
default_dataset = 'cifar10'
dataset_root = './'
train_batch_size = 128
test_batch_size = 128
learning_rate = 0.1
momentum = 0.9
optimiser = "sgd"
loss = "cross_entropy"
gamma = 1.0
gamma2 = 1.0
gamma3 = 1.0
lamda = 1.0
weight_decay = 5e-4
log_interval = 50
save_interval = 50
save_loc = './'
model_name = None
saved_model_name = "resnet50_cross_entropy_350.model"
load_loc = './'
model = "resnet50"
epoch = 350
first_milestone = 150 #Milestone for change in lr
second_milestone = 250 #Milestone for change in lr
gamma_schedule_step1 = 100
gamma_schedule_step2 = 250
parser = argparse.ArgumentParser(
description="Training for calibration.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", type=str, default=default_dataset,
dest="dataset", help='dataset to train on')
parser.add_argument("--dataset-root", type=str, default=dataset_root,
dest="dataset_root", help='root path of the dataset (for tiny imagenet)')
parser.add_argument("--data-aug", action="store_true", dest="data_aug")
parser.set_defaults(data_aug=True)
parser.add_argument("-g", action="store_true", dest="gpu",
help="Use GPU")
parser.set_defaults(gpu=True)
parser.add_argument("--load", action="store_true", dest="load",
help="Load from pretrained model")
parser.set_defaults(load=False)
parser.add_argument("-b", type=int, default=train_batch_size,
dest="train_batch_size", help="Batch size")
parser.add_argument("-tb", type=int, default=test_batch_size,
dest="test_batch_size", help="Test Batch size")
parser.add_argument("-e", type=int, default=epoch, dest="epoch",
help='Number of training epochs')
parser.add_argument("--lr", type=float, default=learning_rate,
dest="learning_rate", help='Learning rate')
parser.add_argument("--mom", type=float, default=momentum,
dest="momentum", help='Momentum')
parser.add_argument("--nesterov", action="store_true", dest="nesterov",
help="Whether to use nesterov momentum in SGD")
parser.set_defaults(nesterov=False)
parser.add_argument("--decay", type=float, default=weight_decay,
dest="weight_decay", help="Weight Decay")
parser.add_argument("--opt", type=str, default=optimiser,
dest="optimiser",
help='Choice of optimisation algorithm')
parser.add_argument("--loss", type=str, default=loss, dest="loss_function",
help="Loss function to be used for training")
parser.add_argument("--loss-mean", action="store_true", dest="loss_mean",
help="whether to take mean of loss instead of sum to train")
parser.set_defaults(loss_mean=False)
parser.add_argument("--gamma", type=float, default=gamma,
dest="gamma", help="Gamma for focal components")
parser.add_argument("--gamma2", type=float, default=gamma2,
dest="gamma2", help="Gamma for different focal components")
parser.add_argument("--gamma3", type=float, default=gamma3,
dest="gamma3", help="Gamma for different focal components")
parser.add_argument("--lamda", type=float, default=lamda,
dest="lamda", help="Regularization factor")
parser.add_argument("--gamma-schedule", type=int, default=0,
dest="gamma_schedule", help="Schedule gamma or not")
parser.add_argument("--gamma-schedule-step1", type=int, default=gamma_schedule_step1,
dest="gamma_schedule_step1", help="1st step for gamma schedule")
parser.add_argument("--gamma-schedule-step2", type=int, default=gamma_schedule_step2,
dest="gamma_schedule_step2", help="2nd step for gamma schedule")
parser.add_argument("--log-interval", type=int, default=log_interval,
dest="log_interval", help="Log Interval on Terminal")
parser.add_argument("--save-interval", type=int, default=save_interval,
dest="save_interval", help="Save Interval on Terminal")
parser.add_argument("--saved_model_name", type=str, default=saved_model_name,
dest="saved_model_name", help="file name of the pre-trained model")
parser.add_argument("--save-path", type=str, default=save_loc,
dest="save_loc",
help='Path to export the model')
parser.add_argument("--model-name", type=str, default=model_name,
dest="model_name",
help='name of the model')
parser.add_argument("--load-path", type=str, default=load_loc,
dest="load_loc",
help='Path to load the model from')
parser.add_argument("--model", type=str, default=model, dest="model",
help='Model to train')
parser.add_argument("--first-milestone", type=int, default=first_milestone,
dest="first_milestone", help="First milestone to change lr")
parser.add_argument("--second-milestone", type=int, default=second_milestone,
dest="second_milestone", help="Second milestone to change lr")
return parser.parse_args()
if __name__ == "__main__":
torch.manual_seed(1)
args = parseArgs()
cuda = False
if (torch.cuda.is_available() and args.gpu):
cuda = True
device = torch.device("cuda" if cuda else "cpu")
print("CUDA set: " + str(cuda))
num_classes = dataset_num_classes[args.dataset]
# Choosing the model to train
net = models[args.model](num_classes=num_classes)
# Setting model name
if args.model_name is None:
args.model_name = args.model
if args.gpu is True:
net.cuda()
net = torch.nn.DataParallel(
net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
start_epoch = 0
num_epochs = args.epoch
if args.load:
net.load_state_dict(torch.load(args.save_loc + args.saved_model_name))
start_epoch = int(args.saved_model_name[args.saved_model_name.rfind('_')+1:args.saved_model_name.rfind('.model')])
if args.optimiser == "sgd":
opt_params = net.parameters()
optimizer = optim.SGD(opt_params,
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=args.nesterov)
elif args.optimiser == "adam":
opt_params = net.parameters()
optimizer = optim.Adam(opt_params,
lr=args.learning_rate,
weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[args.first_milestone, args.second_milestone], gamma=0.1)
if (args.dataset == 'tiny_imagenet'):
train_loader = dataset_loader[args.dataset].get_data_loader(
root=args.dataset_root,
split='train',
batch_size=args.train_batch_size,
pin_memory=args.gpu)
val_loader = dataset_loader[args.dataset].get_data_loader(
root=args.dataset_root,
split='val',
batch_size=args.test_batch_size,
pin_memory=args.gpu)
test_loader = dataset_loader[args.dataset].get_data_loader(
root=args.dataset_root,
split='val',
batch_size=args.test_batch_size,
pin_memory=args.gpu)
else:
train_loader, val_loader = dataset_loader[args.dataset].get_train_valid_loader(
batch_size=args.train_batch_size,
augment=args.data_aug,
random_seed=1,
pin_memory=args.gpu
)
test_loader = dataset_loader[args.dataset].get_test_loader(
batch_size=args.test_batch_size,
pin_memory=args.gpu
)
training_set_loss = {}
val_set_loss = {}
test_set_loss = {}
val_set_err = {}
for epoch in range(0, start_epoch):
scheduler.step()
best_val_acc = 0
for epoch in range(start_epoch, num_epochs):
scheduler.step()
if (args.loss_function == 'focal_loss' and args.gamma_schedule == 1):
if (epoch < args.gamma_schedule_step1):
gamma = args.gamma
elif (epoch >= args.gamma_schedule_step1 and epoch < args.gamma_schedule_step2):
gamma = args.gamma2
else:
gamma = args.gamma3
else:
gamma = args.gamma
train_loss = train_single_epoch(epoch,
net,
train_loader,
optimizer,
device,
loss_function=args.loss_function,
gamma=gamma,
lamda=args.lamda,
loss_mean=args.loss_mean)
val_loss = test_single_epoch(epoch,
net,
val_loader,
device,
loss_function=args.loss_function,
gamma=gamma,
lamda=args.lamda)
test_loss = test_single_epoch(epoch,
net,
val_loader,
device,
loss_function=args.loss_function,
gamma=gamma,
lamda=args.lamda)
_, val_acc, _, _, _ = test_classification_net(net, val_loader, device)
training_set_loss[epoch] = train_loss
val_set_loss[epoch] = val_loss
test_set_loss[epoch] = test_loss
val_set_err[epoch] = 1 - val_acc
if val_acc > best_val_acc:
best_val_acc = val_acc
print('New best error: %.4f' % (1 - best_val_acc))
save_name = args.save_loc + \
args.model_name + '_' + \
loss_function_save_name(args.loss_function, args.gamma_schedule, gamma, args.gamma, args.gamma2, args.gamma3, args.lamda) + \
'_best_' + \
str(epoch + 1) + '.model'
torch.save(net.state_dict(), save_name)
if (epoch + 1) % args.save_interval == 0:
save_name = args.save_loc + \
args.model_name + '_' + \
loss_function_save_name(args.loss_function, args.gamma_schedule, gamma, args.gamma, args.gamma2, args.gamma3, args.lamda) + \
'_' + str(epoch + 1) + '.model'
torch.save(net.state_dict(), save_name)
with open(save_name[:save_name.rfind('_')] + '_train_loss.json', 'a') as f:
json.dump(training_set_loss, f)
with open(save_name[:save_name.rfind('_')] + '_val_loss.json', 'a') as fv:
json.dump(val_set_loss, fv)
with open(save_name[:save_name.rfind('_')] + '_test_loss.json', 'a') as ft:
json.dump(test_set_loss, ft)
with open(save_name[:save_name.rfind('_')] + '_val_error.json', 'a') as ft:
json.dump(val_set_err, ft)
| 13,922 | 39.008621 | 149 | py |
focal_calibration | focal_calibration-main/Data/cifar100.py | """
Create train, valid, test iterators for CIFAR-100.
Train set size: 45000
Val set size: 5000
Test set size: 10000
"""
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=False,
get_val_temp=0):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-100 dataset.
Params:
------
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
- get_val_temp: set to 1 if temperature is to be set on a separate
val set other than normal val set.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if augment:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# load the dataset
data_dir = './data'
train_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=True, transform=train_transform,
)
valid_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=False, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
if get_val_temp > 0:
valid_temp_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=False, transform=valid_transform,
)
split = int(np.floor(get_val_temp * split))
valid_idx, valid_temp_idx = valid_idx[split:], valid_idx[:split]
valid_temp_sampler = SubsetRandomSampler(valid_temp_idx)
valid_temp_loader = torch.utils.data.DataLoader(
valid_temp_dataset, batch_size=batch_size, sampler=valid_temp_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
if get_val_temp > 0:
return (train_loader, valid_loader, valid_temp_loader)
else:
return (train_loader, valid_loader)
def get_test_loader(batch_size,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-100 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
# define transform
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
data_dir = './data'
dataset = datasets.CIFAR100(
root=data_dir, train=False,
download=True, transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
| 5,443 | 31.795181 | 82 | py |
focal_calibration | focal_calibration-main/Data/cifar10.py | """
Create train, valid, test iterators for CIFAR-10.
Train set size: 45000
Val set size: 5000
Test set size: 10000
"""
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=False,
get_val_temp=0):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset.
Params:
------
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
- get_val_temp: set to 1 if temperature is to be set on a separate
val set other than normal val set.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if augment:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# load the dataset
data_dir = './data'
train_dataset = datasets.CIFAR10(
root=data_dir, train=True,
download=True, transform=train_transform,
)
valid_dataset = datasets.CIFAR10(
root=data_dir, train=True,
download=False, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
if get_val_temp > 0:
valid_temp_dataset = datasets.CIFAR10(
root=data_dir, train=True,
download=False, transform=valid_transform,
)
split = int(np.floor(get_val_temp * split))
valid_idx, valid_temp_idx = valid_idx[split:], valid_idx[:split]
valid_temp_sampler = SubsetRandomSampler(valid_temp_idx)
valid_temp_loader = torch.utils.data.DataLoader(
valid_temp_dataset, batch_size=batch_size, sampler=valid_temp_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
if get_val_temp > 0:
return (train_loader, valid_loader, valid_temp_loader)
else:
return (train_loader, valid_loader)
def get_test_loader(batch_size,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-10 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transform
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
data_dir = './data'
dataset = datasets.CIFAR10(
root=data_dir, train=False,
download=True, transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
| 5,395 | 31.70303 | 82 | py |
focal_calibration | focal_calibration-main/Data/svhn.py |
import os
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the SVHN dataset.
Params:
------
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
#if augment:
# train_transform = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
# ])
#else:
# train_transform = transforms.Compose([
# transforms.ToTensor(),
# normalize,
# ])
# load the dataset
data_dir = './data'
train_dataset = datasets.SVHN(
root=data_dir, split='train',
download=True, transform=valid_transform,
)
valid_dataset = datasets.SVHN(
root=data_dir, split='train',
download=True, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
return (train_loader, valid_loader)
def get_test_loader(batch_size,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning a multi-process
test iterator over the SVHN dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transform
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
data_dir = './data'
dataset = datasets.SVHN(
root=data_dir, split='test',
download=True, transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
| 4,471 | 30.272727 | 76 | py |
focal_calibration | focal_calibration-main/Data/tiny_imagenet.py | """
Create train, val, test iterators for Tiny ImageNet.
Train set size: 100000
Val set size: 10000
Test set size: 10000
Number of classes: 200
Link: https://tiny-imagenet.herokuapp.com/
"""
import os
import torch
import numpy as numpy
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import os
import glob
from torch.utils.data import Dataset
from PIL import Image
EXTENSION = 'JPEG'
NUM_IMAGES_PER_CLASS = 500
CLASS_LIST_FILE = 'wnids.txt'
VAL_ANNOTATION_FILE = 'val_annotations.txt'
class TinyImageNet(Dataset):
"""Tiny ImageNet data set available from `http://cs231n.stanford.edu/tiny-imagenet-200.zip`.
Parameters
----------
root: string
Root directory including `train`, `test` and `val` subdirectories.
split: string
Indicating which split to return as a data set.
Valid option: [`train`, `test`, `val`]
transform: torchvision.transforms
A (series) of valid transformation(s).
in_memory: bool
Set to True if there is enough memory (about 5G) and want to minimize disk IO overhead.
"""
def __init__(self, root, split='train', transform=None, target_transform=None, in_memory=False):
self.root = os.path.expanduser(root)
self.split = split
self.transform = transform
self.target_transform = target_transform
self.in_memory = in_memory
self.split_dir = os.path.join(root, self.split)
self.image_paths = sorted(glob.iglob(os.path.join(self.split_dir, '**', '*.%s' % EXTENSION), recursive=True))
self.labels = {} # fname - label number mapping
self.images = [] # used for in-memory processing
# build class label - number mapping
with open(os.path.join(self.root, CLASS_LIST_FILE), 'r') as fp:
self.label_texts = sorted([text.strip() for text in fp.readlines()])
self.label_text_to_number = {text: i for i, text in enumerate(self.label_texts)}
if self.split == 'train':
for label_text, i in self.label_text_to_number.items():
for cnt in range(NUM_IMAGES_PER_CLASS):
self.labels['%s_%d.%s' % (label_text, cnt, EXTENSION)] = i
elif self.split == 'val':
with open(os.path.join(self.split_dir, VAL_ANNOTATION_FILE), 'r') as fp:
for line in fp.readlines():
terms = line.split('\t')
file_name, label_text = terms[0], terms[1]
self.labels[file_name] = self.label_text_to_number[label_text]
# read all images into torch tensor in memory to minimize disk IO overhead
if self.in_memory:
self.images = [self.read_image(path) for path in self.image_paths]
def __len__(self):
return len(self.image_paths)
def __getitem__(self, index):
file_path = self.image_paths[index]
if self.in_memory:
img = self.images[index]
else:
img = self.read_image(file_path)
if self.split == 'test':
return img
else:
# file_name = file_path.split('/')[-1]
return img, self.labels[os.path.basename(file_path)]
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = self.split
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def read_image(self, path):
img = Image.open(path)
if (img.mode == 'L'):
img = img.convert('RGB')
return self.transform(img) if self.transform else img
def get_data_loader(root,
batch_size,
split='train',
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the Tiny Imagenet dataset. A sample
9x9 grid of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- root: The root directory for TinyImagenet dataset
- batch_size: how many samples per batch to load.
- split: Can be train/val/test. For train we apply the data augmentation techniques.
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
val_test_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_transform = transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
# load the dataset
data_dir = root
if (split == 'train'):
dataset = TinyImageNet(data_dir,
split='train',
transform=train_transform,
in_memory=True)
else:
dataset = TinyImageNet(data_dir,
split='val',
transform=val_test_transform,
in_memory=True)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory, shuffle=True
)
return data_loader | 6,288 | 34.937143 | 117 | py |
focal_calibration | focal_calibration-main/Metrics/ood_test_utils.py | # Utility functions to get OOD detection ROC curves and AUROC scores
# Ideally should be agnostic of model architectures
import torch
import torch.nn.functional as F
from sklearn import metrics
def entropy(net_output):
p = F.softmax(net_output, dim=1)
logp = F.log_softmax(net_output, dim=1)
plogp = p * logp
entropy = - torch.sum(plogp, dim=1)
return entropy
def confidence(net_output):
p = F.softmax(net_output, dim=1)
confidence, _ = torch.max(p, dim=1)
return confidence
def get_roc_auc(net, test_loader, ood_test_loader, device):
bin_labels_entropies = None
bin_labels_confidences = None
entropies = None
confidences = None
net.eval()
with torch.no_grad():
# Getting entropies for in-distribution data
for i, (data, label) in enumerate(test_loader):
data = data.to(device)
label = label.to(device)
bin_label_entropy = torch.zeros(label.shape).to(device)
bin_label_confidence = torch.ones(label.shape).to(device)
net_output = net(data)
entrop = entropy(net_output)
conf = confidence(net_output)
if (i == 0):
bin_labels_entropies = bin_label_entropy
bin_labels_confidences = bin_label_confidence
entropies = entrop
confidences = conf
else:
bin_labels_entropies = torch.cat((bin_labels_entropies, bin_label_entropy))
bin_labels_confidences = torch.cat((bin_labels_confidences, bin_label_confidence))
entropies = torch.cat((entropies, entrop))
confidences = torch.cat((confidences, conf))
# Getting entropies for OOD data
for i, (data, label) in enumerate(ood_test_loader):
data = data.to(device)
label = label.to(device)
bin_label_entropy = torch.ones(label.shape).to(device)
bin_label_confidence = torch.zeros(label.shape).to(device)
net_output = net(data)
entrop = entropy(net_output)
conf = confidence(net_output)
bin_labels_entropies = torch.cat((bin_labels_entropies, bin_label_entropy))
bin_labels_confidences = torch.cat((bin_labels_confidences, bin_label_confidence))
entropies = torch.cat((entropies, entrop))
confidences = torch.cat((confidences, conf))
fpr_entropy, tpr_entropy, thresholds_entropy = metrics.roc_curve(bin_labels_entropies.cpu().numpy(), entropies.cpu().numpy())
fpr_confidence, tpr_confidence, thresholds_confidence = metrics.roc_curve(bin_labels_confidences.cpu().numpy(), confidences.cpu().numpy())
auc_entropy = metrics.roc_auc_score(bin_labels_entropies.cpu().numpy(), entropies.cpu().numpy())
auc_confidence = metrics.roc_auc_score(bin_labels_confidences.cpu().numpy(), confidences.cpu().numpy())
return (fpr_entropy, tpr_entropy, thresholds_entropy), (fpr_confidence, tpr_confidence, thresholds_confidence), auc_entropy, auc_confidence
| 3,065 | 38.818182 | 143 | py |
focal_calibration | focal_calibration-main/Metrics/metrics.py | '''
Metrics to measure calibration of a trained deep neural network.
References:
[1] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger. On calibration of modern neural networks.
arXiv preprint arXiv:1706.04599, 2017.
'''
import math
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# Some keys used for the following dictionaries
COUNT = 'count'
CONF = 'conf'
ACC = 'acc'
BIN_ACC = 'bin_acc'
BIN_CONF = 'bin_conf'
def _bin_initializer(bin_dict, num_bins=10):
for i in range(num_bins):
bin_dict[i][COUNT] = 0
bin_dict[i][CONF] = 0
bin_dict[i][ACC] = 0
bin_dict[i][BIN_ACC] = 0
bin_dict[i][BIN_CONF] = 0
def _populate_bins(confs, preds, labels, num_bins=10):
bin_dict = {}
for i in range(num_bins):
bin_dict[i] = {}
_bin_initializer(bin_dict, num_bins)
num_test_samples = len(confs)
for i in range(0, num_test_samples):
confidence = confs[i]
prediction = preds[i]
label = labels[i]
binn = int(math.ceil(((num_bins * confidence) - 1)))
bin_dict[binn][COUNT] = bin_dict[binn][COUNT] + 1
bin_dict[binn][CONF] = bin_dict[binn][CONF] + confidence
bin_dict[binn][ACC] = bin_dict[binn][ACC] + \
(1 if (label == prediction) else 0)
for binn in range(0, num_bins):
if (bin_dict[binn][COUNT] == 0):
bin_dict[binn][BIN_ACC] = 0
bin_dict[binn][BIN_CONF] = 0
else:
bin_dict[binn][BIN_ACC] = float(
bin_dict[binn][ACC]) / bin_dict[binn][COUNT]
bin_dict[binn][BIN_CONF] = bin_dict[binn][CONF] / \
float(bin_dict[binn][COUNT])
return bin_dict
def expected_calibration_error(confs, preds, labels, num_bins=10):
bin_dict = _populate_bins(confs, preds, labels, num_bins)
num_samples = len(labels)
ece = 0
for i in range(num_bins):
bin_accuracy = bin_dict[i][BIN_ACC]
bin_confidence = bin_dict[i][BIN_CONF]
bin_count = bin_dict[i][COUNT]
ece += (float(bin_count) / num_samples) * \
abs(bin_accuracy - bin_confidence)
return ece
def maximum_calibration_error(confs, preds, labels, num_bins=10):
bin_dict = _populate_bins(confs, preds, labels, num_bins)
ce = []
for i in range(num_bins):
bin_accuracy = bin_dict[i][BIN_ACC]
bin_confidence = bin_dict[i][BIN_CONF]
ce.append(abs(bin_accuracy - bin_confidence))
return max(ce)
def average_calibration_error(confs, preds, labels, num_bins=10):
bin_dict = _populate_bins(confs, preds, labels, num_bins)
non_empty_bins = 0
ace = 0
for i in range(num_bins):
bin_accuracy = bin_dict[i][BIN_ACC]
bin_confidence = bin_dict[i][BIN_CONF]
bin_count = bin_dict[i][COUNT]
if bin_count > 0:
non_empty_bins += 1
ace += abs(bin_accuracy - bin_confidence)
return ace / float(non_empty_bins)
def l2_error(confs, preds, labels, num_bins=15):
bin_dict = _populate_bins(confs, preds, labels, num_bins)
num_samples = len(labels)
l2_sum = 0
for i in range(num_bins):
bin_accuracy = bin_dict[i][BIN_ACC]
bin_confidence = bin_dict[i][BIN_CONF]
bin_count = bin_dict[i][COUNT]
l2_sum += (float(bin_count) / num_samples) * \
(bin_accuracy - bin_confidence)**2
l2_error = math.sqrt(l2_sum)
return l2_error
def test_classification_net_logits(logits, labels):
'''
This function reports classification accuracy and confusion matrix given logits and labels
from a model.
'''
labels_list = []
predictions_list = []
confidence_vals_list = []
softmax = F.softmax(logits, dim=1)
confidence_vals, predictions = torch.max(softmax, dim=1)
labels_list.extend(labels.cpu().numpy().tolist())
predictions_list.extend(predictions.cpu().numpy().tolist())
confidence_vals_list.extend(confidence_vals.cpu().numpy().tolist())
accuracy = accuracy_score(labels_list, predictions_list)
return confusion_matrix(labels_list, predictions_list), accuracy, labels_list,\
predictions_list, confidence_vals_list
def test_classification_net(model, data_loader, device):
'''
This function reports classification accuracy and confusion matrix over a dataset.
'''
model.eval()
labels_list = []
predictions_list = []
confidence_vals_list = []
with torch.no_grad():
for i, (data, label) in enumerate(data_loader):
data = data.to(device)
label = label.to(device)
logits = model(data)
softmax = F.softmax(logits, dim=1)
confidence_vals, predictions = torch.max(softmax, dim=1)
labels_list.extend(label.cpu().numpy().tolist())
predictions_list.extend(predictions.cpu().numpy().tolist())
confidence_vals_list.extend(confidence_vals.cpu().numpy().tolist())
accuracy = accuracy_score(labels_list, predictions_list)
return confusion_matrix(labels_list, predictions_list), accuracy, labels_list,\
predictions_list, confidence_vals_list
# Calibration error scores in the form of loss metrics
class ECELoss(nn.Module):
'''
Compute ECE (Expected Calibration Error)
'''
def __init__(self, n_bins=15):
super(ECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ece = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
class AdaptiveECELoss(nn.Module):
'''
Compute Adaptive ECE
'''
def __init__(self, n_bins=15):
super(AdaptiveECELoss, self).__init__()
self.nbins = n_bins
def histedges_equalN(self, x):
npt = len(x)
return np.interp(np.linspace(0, npt, self.nbins + 1),
np.arange(npt),
np.sort(x))
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
n, bin_boundaries = np.histogram(confidences.cpu().detach(), self.histedges_equalN(confidences.cpu().detach()))
#print(n,confidences,bin_boundaries)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
ece = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
class ClasswiseECELoss(nn.Module):
'''
Compute Classwise ECE
'''
def __init__(self, n_bins=15):
super(ClasswiseECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, logits, labels):
num_classes = int((torch.max(labels) + 1).item())
softmaxes = F.softmax(logits, dim=1)
per_class_sce = None
for i in range(num_classes):
class_confidences = softmaxes[:, i]
class_sce = torch.zeros(1, device=logits.device)
labels_in_class = labels.eq(i) # one-hot vector of all positions where the label belongs to the class i
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
in_bin = class_confidences.gt(bin_lower.item()) * class_confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = labels_in_class[in_bin].float().mean()
avg_confidence_in_bin = class_confidences[in_bin].mean()
class_sce += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
if (i == 0):
per_class_sce = class_sce
else:
per_class_sce = torch.cat((per_class_sce, class_sce), dim=0)
sce = torch.mean(per_class_sce)
return sce
| 9,347 | 35.092664 | 119 | py |
focal_calibration | focal_calibration-main/Losses/brier_score.py | '''
Implementation of Brier Score.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class BrierScore(nn.Module):
def __init__(self):
super(BrierScore, self).__init__()
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
target_one_hot = torch.FloatTensor(input.shape).to(target.get_device())
target_one_hot.zero_()
target_one_hot.scatter_(1, target, 1)
pt = F.softmax(input)
squared_diff = (target_one_hot - pt) ** 2
loss = torch.sum(squared_diff) / float(input.shape[0])
return loss | 895 | 32.185185 | 84 | py |
focal_calibration | focal_calibration-main/Losses/loss.py | '''
Implementation of the following loss functions:
1. Cross Entropy
2. Focal Loss
3. Cross Entropy + MMCE_weighted
4. Cross Entropy + MMCE
5. Brier Score
'''
from torch.nn import functional as F
from Losses.focal_loss import FocalLoss
from Losses.focal_loss_adaptive_gamma import FocalLossAdaptive
from Losses.mmce import MMCE, MMCE_weighted
from Losses.brier_score import BrierScore
def cross_entropy(logits, targets, **kwargs):
return F.cross_entropy(logits, targets, reduction='sum')
def focal_loss(logits, targets, **kwargs):
return FocalLoss(gamma=kwargs['gamma'])(logits, targets)
def focal_loss_adaptive(logits, targets, **kwargs):
return FocalLossAdaptive(gamma=kwargs['gamma'],
device=kwargs['device'])(logits, targets)
def mmce(logits, targets, **kwargs):
ce = F.cross_entropy(logits, targets)
mmce = MMCE(kwargs['device'])(logits, targets)
return ce + (kwargs['lamda'] * mmce)
def mmce_weighted(logits, targets, **kwargs):
ce = F.cross_entropy(logits, targets)
mmce = MMCE_weighted(kwargs['device'])(logits, targets)
return ce + (kwargs['lamda'] * mmce)
def brier_score(logits, targets, **kwargs):
return BrierScore()(logits, targets) | 1,228 | 27.581395 | 70 | py |
focal_calibration | focal_calibration-main/Losses/mmce.py | '''
Implementation of the MMCE (MMCE_m) and MMCE_weighted (MMCE_w).
Reference:
[1] A. Kumar, S. Sarawagi, U. Jain, Trainable Calibration Measures for Neural Networks from Kernel Mean Embeddings.
ICML, 2018.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class MMCE(nn.Module):
"""
Computes MMCE_m loss.
"""
def __init__(self, device):
super(MMCE, self).__init__()
self.device = device
def torch_kernel(self, matrix):
return torch.exp(-1.0*torch.abs(matrix[:, :, 0] - matrix[:, :, 1])/(0.4))
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1) #For CIFAR-10 and CIFAR-100, target.shape is [N] to begin with
predicted_probs = F.softmax(input, dim=1)
predicted_probs, pred_labels = torch.max(predicted_probs, 1)
correct_mask = torch.where(torch.eq(pred_labels, target),
torch.ones(pred_labels.shape).to(self.device),
torch.zeros(pred_labels.shape).to(self.device))
c_minus_r = correct_mask - predicted_probs
dot_product = torch.mm(c_minus_r.unsqueeze(1),
c_minus_r.unsqueeze(0))
prob_tiled = predicted_probs.unsqueeze(1).repeat(1, predicted_probs.shape[0]).unsqueeze(2)
prob_pairs = torch.cat([prob_tiled, prob_tiled.permute(1, 0, 2)],
dim=2)
kernel_prob_pairs = self.torch_kernel(prob_pairs)
numerator = dot_product*kernel_prob_pairs
#return torch.sum(numerator)/correct_mask.shape[0]**2
return torch.sum(numerator)/torch.pow(torch.tensor(correct_mask.shape[0]).type(torch.FloatTensor),2)
class MMCE_weighted(nn.Module):
"""
Computes MMCE_w loss.
"""
def __init__(self, device):
super(MMCE_weighted, self).__init__()
self.device = device
def torch_kernel(self, matrix):
return torch.exp(-1.0*torch.abs(matrix[:, :, 0] - matrix[:, :, 1])/(0.4))
def get_pairs(self, tensor1, tensor2):
correct_prob_tiled = tensor1.unsqueeze(1).repeat(1, tensor1.shape[0]).unsqueeze(2)
incorrect_prob_tiled = tensor2.unsqueeze(1).repeat(1, tensor2.shape[0]).unsqueeze(2)
correct_prob_pairs = torch.cat([correct_prob_tiled, correct_prob_tiled.permute(1, 0, 2)],
dim=2)
incorrect_prob_pairs = torch.cat([incorrect_prob_tiled, incorrect_prob_tiled.permute(1, 0, 2)],
dim=2)
correct_prob_tiled_1 = tensor1.unsqueeze(1).repeat(1, tensor2.shape[0]).unsqueeze(2)
incorrect_prob_tiled_1 = tensor2.unsqueeze(1).repeat(1, tensor1.shape[0]).unsqueeze(2)
correct_incorrect_pairs = torch.cat([correct_prob_tiled_1, incorrect_prob_tiled_1.permute(1, 0, 2)],
dim=2)
return correct_prob_pairs, incorrect_prob_pairs, correct_incorrect_pairs
def get_out_tensor(self, tensor1, tensor2):
return torch.mean(tensor1*tensor2)
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1) #For CIFAR-10 and CIFAR-100, target.shape is [N] to begin with
predicted_probs = F.softmax(input, dim=1)
predicted_probs, predicted_labels = torch.max(predicted_probs, 1)
correct_mask = torch.where(torch.eq(predicted_labels, target),
torch.ones(predicted_labels.shape).to(self.device),
torch.zeros(predicted_labels.shape).to(self.device))
k = torch.sum(correct_mask).type(torch.int64)
k_p = torch.sum(1.0 - correct_mask).type(torch.int64)
cond_k = torch.where(torch.eq(k,0),torch.tensor(0).to(self.device),torch.tensor(1).to(self.device))
cond_k_p = torch.where(torch.eq(k_p,0),torch.tensor(0).to(self.device),torch.tensor(1).to(self.device))
k = torch.max(k, torch.tensor(1).to(self.device))*cond_k*cond_k_p + (1 - cond_k*cond_k_p)*2
k_p = torch.max(k_p, torch.tensor(1).to(self.device))*cond_k_p*cond_k + ((1 - cond_k_p*cond_k)*
(correct_mask.shape[0] - 2))
correct_prob, _ = torch.topk(predicted_probs*correct_mask, k)
incorrect_prob, _ = torch.topk(predicted_probs*(1 - correct_mask), k_p)
correct_prob_pairs, incorrect_prob_pairs,\
correct_incorrect_pairs = self.get_pairs(correct_prob, incorrect_prob)
correct_kernel = self.torch_kernel(correct_prob_pairs)
incorrect_kernel = self.torch_kernel(incorrect_prob_pairs)
correct_incorrect_kernel = self.torch_kernel(correct_incorrect_pairs)
sampling_weights_correct = torch.mm((1.0 - correct_prob).unsqueeze(1), (1.0 - correct_prob).unsqueeze(0))
correct_correct_vals = self.get_out_tensor(correct_kernel,
sampling_weights_correct)
sampling_weights_incorrect = torch.mm(incorrect_prob.unsqueeze(1), incorrect_prob.unsqueeze(0))
incorrect_incorrect_vals = self.get_out_tensor(incorrect_kernel,
sampling_weights_incorrect)
sampling_correct_incorrect = torch.mm((1.0 - correct_prob).unsqueeze(1), incorrect_prob.unsqueeze(0))
correct_incorrect_vals = self.get_out_tensor(correct_incorrect_kernel,
sampling_correct_incorrect)
correct_denom = torch.sum(1.0 - correct_prob)
incorrect_denom = torch.sum(incorrect_prob)
m = torch.sum(correct_mask)
n = torch.sum(1.0 - correct_mask)
mmd_error = 1.0/(m*m + 1e-5) * torch.sum(correct_correct_vals)
mmd_error += 1.0/(n*n + 1e-5) * torch.sum(incorrect_incorrect_vals)
mmd_error -= 2.0/(m*n + 1e-5) * torch.sum(correct_incorrect_vals)
return torch.max((cond_k*cond_k_p).type(torch.FloatTensor).to(self.device).detach()*torch.sqrt(mmd_error + 1e-10), torch.tensor(0.0).to(self.device)) | 6,583 | 46.028571 | 157 | py |
focal_calibration | focal_calibration-main/Losses/focal_loss.py | '''
Implementation of Focal Loss.
Reference:
[1] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, Focal loss for dense object detection.
arXiv preprint arXiv:1708.02002, 2017.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=0, size_average=False):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = logpt.exp()
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum() | 1,084 | 32.90625 | 99 | py |
focal_calibration | focal_calibration-main/Losses/focal_loss_adaptive_gamma.py | '''
Implementation of Focal Loss with adaptive gamma.
Reference:
[1] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, Focal loss for dense object detection.
arXiv preprint arXiv:1708.02002, 2017.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.special import lambertw
import numpy as np
def get_gamma(p=0.2):
'''
Get the gamma for a given pt where the function g(p, gamma) = 1
'''
y = ((1-p)**(1-(1-p)/(p*np.log(p)))/(p*np.log(p)))*np.log(1-p)
gamma_complex = (1-p)/(p*np.log(p)) + lambertw(-y + 1e-12, k=-1)/np.log(1-p)
gamma = np.real(gamma_complex) #gamma for which p_t > p results in g(p_t,gamma)<1
return gamma
ps = [0.2, 0.5]
gammas = [5.0, 3.0]
i = 0
gamma_dic = {}
for p in ps:
gamma_dic[p] = gammas[i]
i += 1
class FocalLossAdaptive(nn.Module):
def __init__(self, gamma=0, size_average=False, device=None):
super(FocalLossAdaptive, self).__init__()
self.size_average = size_average
self.gamma = gamma
self.device = device
def get_gamma_list(self, pt):
gamma_list = []
batch_size = pt.shape[0]
for i in range(batch_size):
pt_sample = pt[i].item()
if (pt_sample >= 0.5):
gamma_list.append(self.gamma)
continue
# Choosing the gamma for the sample
for key in sorted(gamma_dic.keys()):
if pt_sample < key:
gamma_list.append(gamma_dic[key])
break
return torch.tensor(gamma_list).to(self.device)
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = logpt.exp()
gamma = self.get_gamma_list(pt)
loss = -1 * (1-pt)**gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
| 2,268 | 31.884058 | 99 | py |
focal_calibration | focal_calibration-main/Net/resnet_tiny_imagenet.py | '''
Pytorch implementation of ResNet models.
Reference:
[1] He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, 2016.
'''
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
# --- HELPERS ---
def conv3x3(in_planes, out_planes, stride=1):
'''
3x3 convolution with padding
'''
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
# --- COMPONENTS ---
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=200, temp=1.0):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
#self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(2)
self.fc = nn.Linear(512 * block.expansion * 4, num_classes)
self.temp = temp
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
#x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x) / self.temp
return x
def resnet18(temp=1.0, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], temp=temp, **kwargs)
return model
def resnet34(temp=1.0, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], temp=temp, **kwargs)
return model
def resnet50(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], temp=temp, **kwargs)
return model
def resnet101(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], temp=temp, **kwargs)
return model
def resnet110(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 26, 3], temp=temp, **kwargs)
return model
def resnet152(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], temp=temp, **kwargs)
return model
| 5,254 | 27.715847 | 109 | py |
focal_calibration | focal_calibration-main/Net/resnet.py | '''
Pytorch implementation of ResNet models.
Reference:
[1] He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, 2016.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, temp=1.0):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc = nn.Linear(512*block.expansion, num_classes)
self.temp = temp
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out) / self.temp
return out
def resnet18(temp=1.0, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], temp=temp, **kwargs)
return model
def resnet34(temp=1.0, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], temp=temp, **kwargs)
return model
def resnet50(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], temp=temp, **kwargs)
return model
def resnet101(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], temp=temp, **kwargs)
return model
def resnet110(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 26, 3], temp=temp, **kwargs)
return model
def resnet152(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], temp=temp, **kwargs)
return model | 4,320 | 33.293651 | 102 | py |
focal_calibration | focal_calibration-main/Net/densenet.py | '''
Pytorch impplementation of DenseNet.
Reference:
[1] Gao Huang, Zhuang Liu, and Kilian Q. Weinberger. Densely connected convolutional networks.
arXiv preprint arXiv:1608.06993, 2016a.
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10, temp=1.0):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
self.temp = temp
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out) / self.temp
return out
def densenet121(temp=1.0, **kwargs):
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32, temp=temp, **kwargs)
def densenet169(temp=1.0, **kwargs):
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32, temp=temp, **kwargs)
def densenet201(temp=1.0, **kwargs):
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32, temp=temp, **kwargs)
def densenet161(temp=1.0, **kwargs):
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48, temp=temp, **kwargs) | 3,725 | 33.183486 | 96 | py |
focal_calibration | focal_calibration-main/Net/wide_resnet.py | '''
Pytorch implementation of wide resnet.
Reference:
[1] S. Zagoruyko and N. Komodakis. Wide residual networks. arXiv preprint arXiv:1605.07146, 2016.
'''
import torch
import torch.nn as nn
import math
def conv3x3(in_planes, out_planes, stride=1):
" 3x3 convolution with padding "
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion=1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Wide_ResNet_Cifar(nn.Module):
def __init__(self, block, layers, wfactor, num_classes=10, temp=1.0):
super(Wide_ResNet_Cifar, self).__init__()
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16*wfactor, layers[0])
self.layer2 = self._make_layer(block, 32*wfactor, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64*wfactor, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64*block.expansion*wfactor, num_classes)
self.temp = temp
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, int(blocks)):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x) / self.temp
return x
def wide_resnet_cifar(temp=1.0, num_classes=10, depth=26, width=10, **kwargs):
assert (depth - 2) % 6 == 0
n = (depth - 2) / 6
return Wide_ResNet_Cifar(BasicBlock, [n, n, n], width, num_classes=num_classes, temp=temp, **kwargs) | 3,441 | 30.87037 | 109 | py |
LERG | LERG-main/explain.py | from lerg.perturbation_models import RandomPM, LIMERandomPM
from lerg.RG_explainers import LERG_LIME, LERG_R, LERG_SHAP, LERG_SHAP_log
from target_models import GPT
import torch
import tqdm
import sys
import json
import os
from datetime import datetime
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--explain_method",type=str,required=True,
help="Choose from 'LERG_S', 'LERG_L', 'SHAP', 'LIME'")
parser.add_argument("--model_dir",type=str,required=True,
help="Directory of the trained target model")
parser.add_argument("--data_path",type=str,required=True,
help="Path of the data for explaining the target model on")
args = parser.parse_args()
def read_data(data_path):
with open(data_path,"r") as fin:
raw_data = json.load(fin)
data = [(line["history"][-1], line["gt_response"]) for line in raw_data["test"]]
return data
def explain_dataset(explainer, model_f, tokenizer, data_path):
if isinstance(explainer, tuple):
PM, LERG = explainer
perturb_f = PM.perturb_inputs
else:
LERG = explainer
perturb_f = None
data = read_data(data_path)
avg_pplc = 0
example_id = 0
now = datetime.now()
nowstr = now.strftime("%m%d%Y_%H%M%S")
if not os.path.exists("exp/"):
os.mkdir("exp")
for x, y in tqdm.tqdm(data):
# experiment on sentences with length less than 30, such that can get explanation using 8G GPU
if len(tokenizer.tokenize(x)) <= 30 and len(tokenizer.tokenize(y)) <= 30:
local_exp = LERG(model_f, x, y, perturb_f, tokenizer)
phi_set, phi_map, x_components, y_components = local_exp.get_local_exp()
save_path = 'exp/{}_{}_{}.exp'.format(args.explain_method, example_id, nowstr)
local_exp.save_exp(save_path)
example_id += 1
if __name__ == "__main__":
PM = RandomPM()
if args.explain_method == "LIME":
PM = LIMERandomPM()
explainer = (PM, LERG_LIME)
elif args.explain_method == "LERG_L":
PM = LIMERandomPM()
explainer = (PM, LERG_LIME_R)
elif args.explain_method == "SHAP":
explainer = (PM, LERG_SHAP)
elif args.explain_method == "LERG_S":
explainer = (PM, LERG_SHAP_log)
else:
raise ValueError("select an explainer from \{'LIME', 'SHAP', 'LERG_L', 'LERG_S'\}, currently is {}".format(args.explain_method))
model = GPT(model_dir=args.model_dir)
explain_dataset(explainer, model.forward, model.tokenizer, args.data_path)
| 2,528 | 36.746269 | 136 | py |
LERG | LERG-main/eval.py | from target_models import GPT
from lerg.metrics import ppl_c, ppl_c_add
from lerg.visualize import plot_interactions
import tqdm
import sys
import json
import torch
import numpy as np
import os
from datetime import datetime
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--explain_method",type=str,required=True,
help="Choose from 'LERG_S', 'LERG_L', 'SHAP', 'LIME', 'attn', 'grad', 'none'(random)")
parser.add_argument("--time_stamp",type=str,required=True,
help="None for 'attn','grad','none'(random); for others, the time stamp in format '%m%d%Y_%H%M%S' of the saved explanations after runing 'explain.py'")
parser.add_argument("--model_dir",type=str,required=True,
help="Directory of the trained target model")
parser.add_argument("--data_path",type=str,required=True,
help="Path of the data for explaining the target model on")
parser.add_argument("--plot",action='store_true',
help="If true, plot the interactions (maps) for all data points")
args = parser.parse_args()
def read_data(data_path):
with open(data_path,"r") as fin:
raw_data = json.load(fin)
data = [(line["history"][-1], line["gt_response"]) for line in raw_data["test"]]
return data
pplc_r_ratios = [0.1,0.2,0.3,0.4,0.5]
ppl_a_ratios = [0.5,0.6,0.7,0.8,0.9]
def evaluate_exp(tokenizer, model_f, data_path):
data = read_data(data_path)
avg_pplc = [0 for _ in pplc_r_ratios]
avg_pplc_add = [0 for _ in ppl_a_ratios]
def count_stats(phi_set, phi_map, x_components, y_components, model_f):
for i, r in enumerate(pplc_r_ratios):
entc, x_re, _, _ = ppl_c(phi_set, x_components, y_components, model_f, ratio=r)
avg_pplc[i] += entc
for i, r in enumerate(ppl_a_ratios):
ent_add, *_ = ppl_c_add(phi_set, x_components, y_components, model_f, ratio=r)
avg_pplc_add[i] += ent_add
example_id = 0
count = 0
if args.plot:
if not os.path.exists("plots/{}/".format(args.explain_method)):
os.mkdir("plots/{}".format(args.explain_method))
if args.explain_method == "attn" or args.explain_method == "none" or args.explain_method == "grad":
for x, y in tqdm.tqdm(data):
if len(tokenizer.tokenize(x)) <= 30 and len(tokenizer.tokenize(y)) <= 30:
if args.explain_method != "none":
phi_set, phi_map, x_components, y_components = model_f([x],y,output_type=args.explain_method)
if args.plot:
plot_interactions(phi_map,x_components,y_components,save_path='plots/{}/{}_{}.png'.format(args.explain_method, example_id, args.time_stamp))
else:
phi_set, phi_map = None, None
x_components = tokenizer.tokenize(x)
y_components = tokenizer.tokenize(y)
count_stats(phi_set, phi_map, x_components, y_components, model_f)
count += 1
example_id += 1
else:
for x, y in tqdm.tqdm(data):
exp_path = 'exp/{}_{}_{}.exp'.format(args.explain_method, example_id, args.time_stamp)
if os.path.exists(exp_path):
phi_set, phi_map, x_components, y_components = torch.load(exp_path)
if args.plot:
plot_interactions(phi_map,x_components,y_components,save_path='plots/{}/{}_{}.png'.format(args.explain_method, example_id, args.time_stamp))
count_stats(phi_set, phi_map, x_components, y_components, model_f)
count += 1
example_id += 1
print(count)
print("PPLC_R:{}".format([np.exp(-pplc_r/count) for pplc_r in avg_pplc]))
print("PPL_A:{}".format([np.exp(-pplc_a/count) for pplc_a in avg_pplc_add]))
if __name__ == "__main__":
model = GPT(model_dir=args.model_dir)
evaluate_exp(model.tokenizer, model.forward, args.data_path)
| 3,915 | 44.534884 | 164 | py |
LERG | LERG-main/target_models.py | import torch
import torch.nn.functional as F
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
from transformers import GPT2Tokenizer, GPT2LMHeadModel
def get_sum_multi_head_attentions(multi_head_attentions):
return sum(torch.sum(x,1) for x in multi_head_attentions)
class GPT:
def __init__(self, model_dir="models/dailydialog_gpt", device="cuda" if torch.cuda.is_available() else "cpu", evaluate=False):
self.device = device
self.tokenizer = OpenAIGPTTokenizer.from_pretrained(model_dir)
self.model = OpenAIGPTLMHeadModel.from_pretrained(model_dir, output_attentions=True).to(device)
self.SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
self.bos, self.eos, self.speaker1, self.speaker2, self.padding = \
self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS)
def forward(self, inputs, label=None, is_x_tokenized=False, is_y_tokenized=False, output_type="prob"):
x_set = [self.tokenizer.convert_tokens_to_ids(x) for x in inputs] if is_x_tokenized \
else [self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(x)) for x in inputs]
x_set = [[self.speaker1] + x for x in x_set]
max_l = max(len(x) for x in x_set)
x_set = [x + [self.padding] * (max_l - len(x)) for x in x_set]
y = self.tokenizer.convert_tokens_to_ids(label) if is_y_tokenized \
else self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(label))
y = [self.speaker2] + y
input_ids = torch.tensor([[self.bos] + x+y for x in x_set]).to(self.device)
token_type_ids = torch.tensor([[self.speaker1] * (len(x)+1) + [self.speaker2] * len(y) for x in x_set]).to(self.device)
if output_type == "prob":
with torch.no_grad():
outputs = self.model(input_ids, token_type_ids=token_type_ids)
probs = F.softmax(outputs.logits, dim=-1)
return probs[:,-len(y):-1,:], y[1:]
elif output_type == "attn":
with torch.no_grad():
outputs = self.model(input_ids, token_type_ids=token_type_ids)
probs = F.softmax(outputs.logits, dim=-1)
attn = get_sum_multi_head_attentions(outputs[-1])
attn = attn[0][max_l+1:, 1:max_l]
attn_map = {}
for xi in range(max_l-1):
for yi in range(len(y)-1):
attn_map[(xi,yi)] = attn[yi,xi]
attn_set = torch.sum(attn,dim=0)
return attn_set, attn_map, self.tokenizer.tokenize(inputs[0]), self.tokenizer.tokenize(label)
elif output_type == "grad":
count = 0
for param in self.model.parameters():
if count > 0:
param.requires_grad = False
else:
embeds = param
count +=1
input_ids = torch.tensor([[self.bos] + x+y + [self.eos] for x in x_set]).to(self.device)
token_type_ids = torch.tensor([[self.speaker1] * (len(x)+1) + [self.speaker2] * (len(y)+1) for x in x_set]).to(self.device)
outputs = self.model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
losses = F.cross_entropy(outputs.logits[0,max_l+2:-1,:], input_ids[0,max_l+3:], reduction="none")
scores = []
for j in range(len(y)-1):
grads = torch.autograd.grad(losses[j],embeds,retain_graph=True, create_graph=False)[0]
mod = embeds - grads
changes = torch.norm(mod, dim=1) - torch.norm(embeds, dim=1)
scores.append(changes[input_ids[0,2:max_l+1]])
grad_map = {}
for xi in range(max_l-1):
for yi in range(len(y)-1):
grad_map[(xi,yi)] = scores[yi][xi]
grad_set = torch.sum(torch.stack(scores),dim=0)
return grad_set, grad_map, self.tokenizer.tokenize(inputs[0]), self.tokenizer.tokenize(label)
| 4,007 | 55.450704 | 135 | py |
LERG | LERG-main/lerg/RG_explainers.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.linear_model import Ridge
import numpy as np
import random
class Explainer():
"""
The base class for various explainers
arguments:
model_f: the tested model function, who generates y given x
require the outputs in the form (probabilities of output sequence, y's tokens ids), having the same length
x: the input
y: the generated sequence
return:
phi_set: correspond to the weight of each x_i
"""
def __init__(self, model_f, x, y):
self.phi_map = {}
self.model_f = model_f
self.x = x
self.y = y
def get_prob(self, probs, y):
y_probs = [[p[yi] for p, yi in zip(prob, y)] for prob in probs]
return y_probs
class LERG(Explainer):
"""
The base class for all Local Explanation methods for Response Generation
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, max_iters=50, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y)
self.perturb_inputs = perturb_f
self.max_iters = max_iters
self.tokenizer = tokenizer
self.device = device
def combine_sequence(self, phi_sets):
"""
phi_sets.shape: (output_dim) x (input_dim)
"""
return torch.sum(phi_sets,dim=0)
def map_to_interactions(self, phi_sets):
phi_map = {}
for yi in range(phi_sets.shape[0]):
for xi in range(phi_sets.shape[1]):
phi_map[(xi,yi)] = phi_sets[yi,xi]
return phi_map
def save_exp(self, save_path):
if self.phi_set is not None:
torch.save([self.phi_set, self.phi_map, self.components, self.y], save_path)
else:
raise ValueError("run get_local_exp() first")
class LERG_LIME(LERG):
"""
LERG by LIME
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, max_iters=50, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y, perturb_f, tokenizer, max_iters, device=device)
self.batchsize = 64
def get_local_exp(self):
self.x = self.tokenizer.tokenize(self.x)
self.y = self.tokenizer.tokenize(self.y)
x_set, z_set, self.components = self.perturb_inputs(self.x)
y_probs = []
for i in range(len(x_set)//self.batchsize + 1 if len(x_set)%self.batchsize > 0 else 0):
probs,y = self.model_f(x_set[i*self.batchsize:(i+1)*self.batchsize], label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs_batch = self.get_prob(probs, y)
y_probs_batch = torch.tensor(y_probs_batch)
y_probs.append(y_probs_batch)
y_probs = torch.cat(y_probs,dim=0)
D = pairwise_distances(z_set,z_set[0].view(1,-1),metric='cosine')
kernel_width = 25# as LIME's original implementation
weights = torch.tensor(np.sqrt(np.exp(-(D ** 2) / kernel_width ** 2)), requires_grad=False).to(self.device)
self.expl_model = nn.Linear(z_set.shape[1],len(y),bias=False).to(self.device)
self.optimizer = torch.optim.SGD(self.expl_model.parameters(), lr=5e-1)
for i in range(self.max_iters):
for z_batch, y_probs_batch, w_batch in zip(torch.split(z_set, self.batchsize), torch.split(y_probs, self.batchsize), torch.split(weights,self.batchsize)):
z_batch = z_batch.to(self.device)
y_probs_batch = y_probs_batch.to(self.device)
preds = self.expl_model(z_batch)# the original version for classifier
loss = torch.mean(w_batch * (preds - y_probs_batch) ** 2)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
with torch.no_grad():
phi_sets = self.expl_model.weight
self.phi_set = self.combine_sequence(phi_sets)
self.phi_map = self.map_to_interactions(phi_sets)
return self.phi_set, self.phi_map, self.components, self.y
class LERG_R(LERG):
"""
LERG use ratio probability
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, max_iters=50, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y, perturb_f, tokenizer, max_iters, device=device)
self.batchsize = 64
def get_local_exp(self):
self.x = self.tokenizer.tokenize(self.x)
self.y = self.tokenizer.tokenize(self.y)
x_set, z_set, self.components = self.perturb_inputs(self.x)
gold_probs,y = self.model_f([self.x], label=self.y, is_x_tokenized=True, is_y_tokenized=True)
gold_probs = self.get_prob(gold_probs, y)
gold_probs = torch.tensor(gold_probs)
gold_probs = gold_probs[0]
probs,y = self.model_f(x_set, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs = self.get_prob(probs, y)
y_probs = torch.tensor(y_probs)
y_probs /= gold_probs
self.expl_model = nn.Linear(z_set.shape[1],len(y),bias=False).to(self.device)
self.optimizer = torch.optim.SGD(self.expl_model.parameters(), lr=5e-1)
for i in range(self.max_iters):
for z_batch, y_probs_batch in zip(torch.split(z_set, self.batchsize), torch.split(y_probs, self.batchsize)):
z_batch = z_batch.to(self.device)
y_probs_batch = y_probs_batch.to(self.device)
preds = self.expl_model(z_batch)
loss = F.mse_loss(preds,y_probs_batch)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
with torch.no_grad():
phi_sets = self.expl_model.weight
self.phi_set = self.combine_sequence(phi_sets)
self.phi_map = self.map_to_interactions(phi_sets)
return self.phi_set, self.phi_map, self.components, self.y
class LERG_SHAP(LERG):
"""
LERG use SampleShapley (original)
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y, perturb_f, tokenizer, max_iters=0, device=device)
def get_local_exp(self):
self.x = self.tokenizer.tokenize(self.x)
self.y = self.tokenizer.tokenize(self.y)
phi_sets = []
for i in range(len(self.x)):
x_set, x_set_with_i, weights, self.components = \
self.perturb_inputs(self.x, num=500//len(self.x), with_i=i)# results in total 1000 samples as LERG_LIME
probs,y = self.model_f(x_set, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs = self.get_prob(probs, y)
y_probs = torch.tensor(y_probs)
probs, _ = self.model_f(x_set_with_i, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs_with_i = self.get_prob(probs, y)
y_probs_with_i = torch.tensor(y_probs_with_i)
weights = torch.tensor(weights).view(-1,1)
phi_sets.append(torch.mean((y_probs_with_i - y_probs)*weights, dim=0))
phi_sets = torch.stack(phi_sets).transpose(0,1)
self.phi_set = self.combine_sequence(phi_sets)
self.phi_map = self.map_to_interactions(phi_sets)
return self.phi_set, self.phi_map, self.components, self.y
class LERG_SHAP_log(LERG):
"""
LERG use Shapley value with sample mean (Logarithm)
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y, perturb_f, tokenizer, max_iters=0, device=device)
def get_local_exp(self):
self.x = self.tokenizer.tokenize(self.x)
self.y = self.tokenizer.tokenize(self.y)
phi_sets = []
for i in range(len(self.x)):
x_set, x_set_with_i, weights, self.components = \
self.perturb_inputs(self.x, num=500//len(self.x), with_i=i)# results in total 1000 samples as LERG_LIME
probs,y = self.model_f(x_set, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs = self.get_prob(probs, y)
y_probs = torch.tensor(y_probs)
probs, _ = self.model_f(x_set_with_i, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs_with_i = self.get_prob(probs, y)
y_probs_with_i = torch.tensor(y_probs_with_i)
phi_sets.append(torch.mean((torch.log(y_probs_with_i) - torch.log(y_probs)), dim=0))
phi_sets = torch.stack(phi_sets).transpose(0,1)
self.phi_set = self.combine_sequence(phi_sets)
self.phi_map = self.map_to_interactions(phi_sets)
return self.phi_set, self.phi_map, self.components, self.y
| 8,905 | 39.666667 | 166 | py |
LERG | LERG-main/lerg/perturbation_models.py | import torch
import warnings
import math
import random
import numpy as np
import pdb
import scipy as sp
import sklearn
from transformers import BartTokenizer, BartForConditionalGeneration
import torch
def binomial_coef_dist(n):
dist = [math.comb(n, i+1) for i in range(n//2)]
total = sum(dist)
dist = [density / total for density in dist]
return dist, total
class BasicPM():
def __init__(self):
pass
def perturb_inputs(self, x, num=1):
"""
argument:
x: the tokenized input sentence
return:
x_set: the perturbed xs
z_set: the simplified features of x_set, {0,1}^|x|, tensor
"""
if num != 1:
warnings.warn("BasicPM will always set argument num == 1")
return [x],torch.tensor([[1.0 for tok in x]])
class RandomPM(BasicPM):
"""
randomly choose tokens to be replaced with sub_t
"""
def __init__(self, sub_t="", denoising=False):
super().__init__()
self.sub_t = sub_t
self.denoising = denoising
if self.denoising:
self.sub_t = '<mask>'
self.bart_tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')
self.bart_model = BartForConditionalGeneration.from_pretrained('facebook/bart-base').to('cuda')
def _select_repl_num(self, dist_scale):
"""
select the number of tokens to be replaces #repl_num, following binomial distribution
"""
pos = random.random()
for i in range(len(dist_scale)):
if pos < dist_scale[i]:
break
return i+1
def _denoise_x_set(self, x_set):
inputs = self.bart_tokenizer(x_set, max_length=256, return_tensors='pt', padding=True).to('cuda')
summary_ids = self.bart_model.generate(
inputs['input_ids'],
top_k=10, top_p=0.9, temperature=0.9, max_length=256,
early_stopping=True, num_return_sequences=1
)
lines = [self.bart_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
x_set = lines
return x_set
def perturb_inputs(self, x, num=1000, with_i=None):
"""
allow half tokens at most can be replaced
"""
dist, num_comb = binomial_coef_dist(len(x))
dist_scale = [sum(dist[:i+1]) for i in range(len(dist))]
num = num if num < num_comb*4 else num_comb*4
"""
choose tokens to be replaced with sub_t
"""
if with_i is None:
x_set, z_set = [], []
else:
x_set, x_set_with_i = [], []
weights = []
for _ in range(num):
repl_num = self._select_repl_num(dist_scale)
x_set.append(list(x))
if with_i is None:
z_set.append(np.ones((len(x),)))
repl_list = random.sample(list(range(len(x))), repl_num)
for t in repl_list:
x_set[-1][t] = self.sub_t
z_set[-1][t] = 0.
else:
x_set_with_i.append(list(x))
indices_to_repl = list(range(len(x)))
indices_to_repl.remove(with_i)
repl_list = random.sample(indices_to_repl, repl_num)
for t in repl_list:
x_set[-1][t] = self.sub_t
x_set_with_i[-1][t] = self.sub_t
x_set[-1][with_i] = self.sub_t
weights.append(1/(dist[repl_num-1]*len(x)))
if self.denoising:
x_set = self._denoise_x_set([' '.join(x) for x in x_set])
if with_i is None:
return x_set, torch.tensor(z_set, dtype=torch.float32), x
else:
return x_set, x_set_with_i, weights, x
class LIMERandomPM(RandomPM):
def perturb_inputs(self, x, num=1000):
"""
allow half tokens at most can be replaced
choose tokens to be replaced with sub_t
"""
dist, num_comb = binomial_coef_dist(len(x))
num = num if num < num_comb*4 else num_comb*4
x_set, z_set = [], []
sample = np.random.randint(1,len(x)//2+1,num-1)
x_set.append(list(x))
z_set.append(np.ones((len(x),)))
for i in range(num-1):
repl_num = sample[i]
x_set.append(list(x))
z_set.append(np.ones((len(x),)))
repl_list = random.sample(list(range(len(x))), repl_num)
for t in repl_list:
x_set[-1][t] = self.sub_t
z_set[-1][t] = 0.
if self.denoising:
x_set = self._denoise_x_set([' '.join(x) for x in x_set])
return x_set, torch.tensor(z_set, dtype=torch.float32), x
| 4,757 | 33.230216 | 130 | py |
LERG | LERG-main/lerg/metrics.py | import torch
import numpy as np
import random
import pdb
from scipy.stats import skew
from collections import Counter
def get_expl(x, expl, ratio=0.2, remain_masks=False):
if expl is None:
x_entities = [tok for tok in x if random.random() < ratio] if not remain_masks else [tok if random.random() < ratio else "__" for tok in x ]
else:
k = int(len(x) * ratio // 1)
topk = torch.topk(expl, k) if not remain_masks else torch.topk(expl, max(k,1))
x_entities = [tok for ind, tok in enumerate(x) if ind in topk.indices] if not remain_masks else [tok if ind in topk.indices else "__" for ind, tok in enumerate(x)]
if remain_masks:
merged = [x_entities[0]]
for tok in x_entities[1:]:
if tok == "__" and merged[-1] == "__":
continue
else:
merged.append(tok)
x_entities = merged
return x_entities
def remove_expl(x, expl, ratio=0.2):
"""
remove given explanation from x
if None explanation is given, randomly remove
"""
if expl is None:
x_re = [tok for tok in x if random.random() >= ratio]
else:
k = int(len(x) * ratio // 1)
topk = torch.topk(expl, k)
x_re = [tok for ind, tok in enumerate(x) if ind not in topk.indices]
return x_re
def get_ppl(probs, y, y_inds=None):
if y_inds is None:
ent = np.sum(np.log(p[yi]) for p, yi in zip(probs[0], y)) / len(y)
else:
ent = np.sum(np.log(p[yi]) for i, (p, yi) in enumerate(zip(probs[0], y)) if i in y_inds) / len(y_inds)
return np.exp(-ent), ent
def ppl_c_add(expl, x, y, model_f, ratio=0.2):
"""
additive perplexity changes
"""
x_add = get_expl(x, expl, ratio=ratio)
y_probs_add, y_inds = model_f([x_add], label=y, is_x_tokenized=True, is_y_tokenized=True)
ppl_add, ent_add = get_ppl(y_probs_add.cpu(), y_inds)
return ent_add, ppl_add, x_add
def ppl_c(expl, x, y, model_f, ratio=0.2):
"""
perplexity changes
"""
x_re = remove_expl(x, expl, ratio=ratio)
y_probs, y_inds = model_f([x], label=y, is_x_tokenized=True, is_y_tokenized=True)
y_probs_re, _ = model_f([x_re], label=y, is_x_tokenized=True, is_y_tokenized=True)
ppl, ent = get_ppl(y_probs.cpu(), y_inds)
ppl_re, ent_re = get_ppl(y_probs_re.cpu(), y_inds)
entc = ent_re - ent
return entc, x_re, ppl, ppl_re
| 2,391 | 33.666667 | 171 | py |
GAN-STEM-Conv2MultiSlice | GAN-STEM-Conv2MultiSlice-master/fnn.py | from __future__ import print_function, division
import scipy
#from keras.datasets import mnist
from keras_contrib.layers.normalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import optimizers
from keras import initializers
from keras import backend as K
import datetime
import matplotlib.pyplot as plt
from matplotlib import gridspec
import sys
import os
import numpy as np
from skimage import io
from sklearn.model_selection import train_test_split
import scipy.misc
from scipy.misc import imsave
from skimage import data, img_as_float
from skimage.measure import compare_ssim as ssim
import math
#data process functions
def getConvPath(material, num):
root = os.getcwd() +"/newData/" + material
if "Pt_input_images" == material:
newPath = root +"/Pt_convolution/Pt_convolution_" + str(num) + ".txt"
elif "Pt-Mo5_input_images" == material:
newPath = root +"/Pt-Mo_convolution/Pt_Mo5_convolution_" + str(num) +".txt"
elif "Pt-Mo50_input_images" == material:
newPath = root + "/Pt281-Mo280-convolution/Pt_Mo50_convolution_" + str(int(num)+2) + ".txt"
else: print("Material key not found! Please check your spelling.")
return newPath
def getMultislicePath(material, num):
root = os.getcwd() + "/newData/" + material
if "Pt_input_images" == material:
newPath = root +"/Pt_multislice_16_phonons/Pt_" + str(num) + "_cl160mm_ss.tif"
elif "Pt-Mo5_input_images" == material:
newPath = root +"/Pt-Mo_multislice/pt_mo5_" + str(num) +"_cl160mm_ss.tif"
elif "Pt-Mo50_input_images" == material:
newPath = root + "/Pt281-Mo280-multislice/pt_mo50_" + str(int(num)+2) + "_cl160mm_ss.tif"
else: print("material key not found! Please check your spelling.")
return newPath
def getNumImages(material):
if "Pt_input_images" == material:
num = 20
elif "Pt-Mo5_input_images" == material:
num = 20
elif "Pt-Mo50_input_images" == material:
num = 18
else:
num = 0
return num
def cutImage(image,height,width):
newImage = image[:height,:width]
return newImage
#returns list of images cut to be min height and width of the group
def cutImages(images):
widths = []
heights = []
cutImages = []
for image in images:
widths.append(len(image[0]))
heights.append(len(image))
minWidth = min(widths)
minHeight = min(heights)
for i in range(len(images)):
cutImages.append(cutImage(images[i],minHeight,minWidth))
return cutImages
def padImage(image, desiredHeight, desiredWidth):
leftSpace = int((desiredWidth - image.shape[1])/2)
topSpace = int((desiredHeight - image.shape[0])/2)
base = np.zeros((desiredHeight,desiredWidth))
base[topSpace:image.shape[0]+topSpace,leftSpace:image.shape[1]+leftSpace]=image
return base
#returns list of images with desired heigh and width
def formatImages(images,height,width):
newImages = []
for image in roundToZeroes(images):
if image.shape[0] > height and image.shape[1] > width:
newImages.append(cutImage(image))
elif image.shape[0] <= height and image.shape[1] < width:
newImages.append(padImage(image,height,width))
elif image.shape[0] >= height and image.shape[1] <= width:
newImages.append(padImage(image[:height,:],height,width))
elif image.shape[0] < height and image.shape[1] >= width:
newImages.append(padImage(image[:,:width],height,width))
return newImages
# rounds any negative values in the matrix to zero. Requested by Dane
def roundToZeroes(images):
for image in images:
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i,j] < 0.0:
image[i,j] = 0.0
return images
def cutPadding(image,height,width):
h_dif = len(image) - height
w_dif = len(image[0]) - width
top = h_dif//2
left = w_dif//2
if h_dif % 2 == 1:
bottom = top + 1
else:
bottom = top
if w_dif % 2 == 1:
right = left + 1
else:
right = left
newImage = image[top:len(image)-bottom ,left:len(image[0])-right]
return newImage
def kerasSSIM(y_true, y_pred):#may be wrong
## mean, std, correlation
mu_x = K.mean(y_pred)
mu_y = K.mean(y_true)
sig_x = K.std(y_pred)
sig_y = K.std(y_true)
sig_xy = (sig_x * sig_y)**0.5
ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2))
return ssim
def mean_squared_error(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=-1)
def customLoss(yTrue,yPred):
# print(backend.shape(yTrue))
# print(backend.shape(yPred))
ssimVal = kerasSSIM(yTrue,yPred)
print(ssimVal)
return alpha * (1-ssimVal) + (1-alpha) * mean_squared_error(yTrue, yPred)
dirArray = ["Pt-Mo5_input_images", "Pt_input_images", "Pt-Mo50_input_images"]
matl = dirArray[1] #specify desired material here
#Parses image data into ndarrays, then slices each array to be the minimum width and height of the group.
#Thus, formattedConvImages and formattedMultiImages will have arrays of all the same size.
convImages = []
multiImages = []
widths = []
heights = []
for d in range (0, 3):
matl = dirArray[d]
for i in range(0,getNumImages(matl)):
convArr = np.loadtxt(getConvPath(matl, i))
multiArr = io.imread(getMultislicePath(matl,i))
#TODO: PLEASE DELETE THIS LINE AFTER DATA PROCESSING
if (len(convArr[0]) <= 256 and len(convArr) <= 256):
widths.append(len(convArr[0]))
heights.append(len(convArr))
convImages.append(convArr)
multiImages.append(multiArr)
minWidth = min(widths)
minHeight = min(heights)
print(minWidth)
print(minHeight)
print(len(convImages))
print(len(multiImages))
print(np.min(convImages[0]))
print(np.max(convImages[0]))
print(np.min(multiImages[0]))
print(np.max(multiImages[0]))
#split data using sklearn
x =convImages
y =multiImages
X_train, X_test, Y_train, Y_test = train_test_split(x, y)
#format data into 256 by 256
newX_Train = formatImages(X_train,256,256)
newY_Train = formatImages(Y_train,256,256)
newX_Test = formatImages(X_test,256,256)
newY_Test = formatImages(Y_test,256,256)
formattedX_Train = np.ndarray(shape = (0,256*256))
for i in range(0,len(newX_Train)):
tempX_Train = newX_Train[i].flatten()
formattedX_Train = np.vstack([formattedX_Train, tempX_Train])
formattedY_Train = np.ndarray(shape = (0,256*256))
for i in range(0,len(newY_Train)):
tempY_Train = newY_Train[i].flatten()
formattedY_Train = np.vstack([formattedY_Train, tempY_Train])
formattedX_Test = np.ndarray(shape = (0,256*256))
for i in range(0,len(newX_Test)):
tempX_Test = newX_Test[i].flatten()
formattedX_Test = np.vstack([formattedX_Test, tempX_Test])
formattedY_Test = np.ndarray(shape = (0,256*256))
for i in range(0,len(newY_Test)):
tempY_Test = newY_Test[i].flatten()
formattedY_Test = np.vstack([formattedY_Test, tempY_Test])
print(formattedX_Train.shape)
print(formattedY_Train.shape)
print(formattedX_Test.shape)
print(formattedY_Test.shape)
model = Sequential()
model.add(Dense(4096, activation='relu', input_dim=65536, kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(1024, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(256, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(128, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(128, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(128, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(128, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dense(512, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dense(1024, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dense(65536, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
alpha = 0.9
"""structural similarity measurement system."""
K1 = 0.01
K2 = 0.03
## L, number of pixels, C1, C2, two constants
L = 255
C1 = math.sqrt(K1 * L)
C2 = math.sqrt(K2 * L)
#sgd = optimizers.SGD(lr=1e-5, decay=1e-6, momentum=0.9, nesterov=True)
adam = optimizers.Adam(lr=1e-5, beta_1=0.45, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(
optimizer=adam,
loss=[customLoss],
metrics=['accuracy']
)
model.summary()
#Training starts here
# Fit the model
history = model.fit(formattedX_Train, formattedY_Train, epochs=400, batch_size=3, verbose=2)
# evaluate the model
scores = model.evaluate(formattedX_Train, formattedY_Train)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# list all data in history
print(history.history.keys())
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Lossadam.png')
#prediction
y_pred = model.predict(formattedX_Train)
for i in range (14):
if (i%3 == 0):
f, axarr = plt.subplots(4, 3)
#Multislice Images
image = Y_test[i]
image = image.clip(min=0)
axarr[0,i%3].imshow(image, cmap=plt.get_cmap('gray'))
axarr[0,i%3].set_title("Multislice Image " + str(i))
#Predicted Images
image_y = y_pred[i, :].reshape([256, 256])
image_y = image_y.clip(min=0)
axarr[1,i%3].imshow(image_y, cmap=plt.get_cmap('gray'))
axarr[1,i%3].set_title("Prediction Image " + str(i))
#Convolutional Images
image_x = formattedX_Test[i, :].reshape([256, 256])
image_x = image_x.clip(min=0)
axarr[2,i%3].imshow(image_x, cmap=plt.get_cmap('gray'))
axarr[2,i%3].set_title("Convolution Image " + str(i))
#Cut padding Image
temp = cutPadding(image_y,len(image), len(image[0]))
axarr[3,i%3].imshow(temp, cmap=plt.get_cmap('gray'))
axarr[3,i%3].set_title("Prediction W/ Padding " + str(i))
formatY = formattedY_Test[i, :].reshape([256, 256])
print(str(i))
print("The RMSE is "+ str(np.sqrt(np.mean(np.power((image_y-formatY),2)))*100) + " %")
print("The RMSE w/o padding is "+ str(np.sqrt(np.mean(np.power((temp-image),2)))*100) + " %")
print("The Max-Min RMSE is "+ str(np.sqrt(np.mean(np.power((image_y-formatY),2)))/(np.max(image) - np.min(image))*100) + " %")
print("SSIM is :" + str(ssim(image, temp.astype(np.float32), data_range=temp.max() - temp.min())) + "\n")
print("Standard Dev RMSE is " + str(np.sqrt(np.mean(np.power((image_y-formatY),2)))/(np.std(image_y)*100)) + " %")
if (i%3 == 2 or i == 14):
#plt.show()
plt.savefig('Prediction with adam' + str(i) + '.png')
plt.clf()
'''
# calculate predictions
# print(str(i))
# print("The RMSE is "+ str(np.sqrt(np.mean(np.power((image_y-formatY),2)))*100) + " %")
# print("The RMSE w/o padding is "+ str(np.sqrt(np.mean(np.power((temp-image),2)))*100) + " %")
# print("The Max-Min RMSE is "+ str(np.sqrt(np.mean(np.power((image_y-formatY),2)))/(np.max(image) - np.min(image))*100) + " %")
# print("SSIM is :" + str(ssim(image, temp.astype(np.float32), data_range=temp.max() - temp.min())) + "\n")
# round predictions
rounded = [round(x[0]) for x in predictions]
print(rounded)
'''
| 11,895 | 34.939577 | 130 | py |
GAN-STEM-Conv2MultiSlice | GAN-STEM-Conv2MultiSlice-master/FNN/fnn.py | from __future__ import print_function, division
import scipy
#from keras.datasets import mnist
from keras_contrib.layers.normalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import optimizers
from keras import initializers
from keras import backend as K
import datetime
import matplotlib.pyplot as plt
from matplotlib import gridspec
import sys
import os
import numpy as np
from skimage import io
from sklearn.model_selection import train_test_split
import scipy.misc
from scipy.misc import imsave
from skimage import data, img_as_float
from skimage.measure import compare_ssim as ssim
import math
#data process functions
def getConvPath(material, num):
root = os.getcwd() +"/newData/" + material
if "Pt_input_images" == material:
newPath = root +"/Pt_convolution/Pt_convolution_" + str(num) + ".txt"
elif "Pt-Mo5_input_images" == material:
newPath = root +"/Pt-Mo_convolution/Pt_Mo5_convolution_" + str(num) +".txt"
elif "Pt-Mo50_input_images" == material:
newPath = root + "/Pt281-Mo280-convolution/Pt_Mo50_convolution_" + str(int(num)+2) + ".txt"
else: print("Material key not found! Please check your spelling.")
return newPath
def getMultislicePath(material, num):
root = os.getcwd() + "/newData/" + material
if "Pt_input_images" == material:
newPath = root +"/Pt_multislice_16_phonons/Pt_" + str(num) + "_cl160mm_ss.tif"
elif "Pt-Mo5_input_images" == material:
newPath = root +"/Pt-Mo_multislice/pt_mo5_" + str(num) +"_cl160mm_ss.tif"
elif "Pt-Mo50_input_images" == material:
newPath = root + "/Pt281-Mo280-multislice/pt_mo50_" + str(int(num)+2) + "_cl160mm_ss.tif"
else: print("material key not found! Please check your spelling.")
return newPath
def getNumImages(material):
if "Pt_input_images" == material:
num = 20
elif "Pt-Mo5_input_images" == material:
num = 20
elif "Pt-Mo50_input_images" == material:
num = 18
else:
num = 0
return num
def cutImage(image,height,width):
newImage = image[:height,:width]
return newImage
#returns list of images cut to be min height and width of the group
def cutImages(images):
widths = []
heights = []
cutImages = []
for image in images:
widths.append(len(image[0]))
heights.append(len(image))
minWidth = min(widths)
minHeight = min(heights)
for i in range(len(images)):
cutImages.append(cutImage(images[i],minHeight,minWidth))
return cutImages
def padImage(image, desiredHeight, desiredWidth):
leftSpace = int((desiredWidth - image.shape[1])/2)
topSpace = int((desiredHeight - image.shape[0])/2)
base = np.zeros((desiredHeight,desiredWidth))
base[topSpace:image.shape[0]+topSpace,leftSpace:image.shape[1]+leftSpace]=image
return base
#returns list of images with desired heigh and width
def formatImages(images,height,width):
newImages = []
for image in roundToZeroes(images):
if image.shape[0] > height and image.shape[1] > width:
newImages.append(cutImage(image))
elif image.shape[0] <= height and image.shape[1] < width:
newImages.append(padImage(image,height,width))
elif image.shape[0] >= height and image.shape[1] <= width:
newImages.append(padImage(image[:height,:],height,width))
elif image.shape[0] < height and image.shape[1] >= width:
newImages.append(padImage(image[:,:width],height,width))
return newImages
# rounds any negative values in the matrix to zero. Requested by Dane
def roundToZeroes(images):
for image in images:
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i,j] < 0.0:
image[i,j] = 0.0
return images
def cutPadding(image,height,width):
h_dif = len(image) - height
w_dif = len(image[0]) - width
top = h_dif//2
left = w_dif//2
if h_dif % 2 == 1:
bottom = top + 1
else:
bottom = top
if w_dif % 2 == 1:
right = left + 1
else:
right = left
newImage = image[top:len(image)-bottom ,left:len(image[0])-right]
return newImage
def kerasSSIM(y_true, y_pred):#may be wrong
## mean, std, correlation
mu_x = K.mean(y_pred)
mu_y = K.mean(y_true)
sig_x = K.std(y_pred)
sig_y = K.std(y_true)
sig_xy = (sig_x * sig_y)**0.5
ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2))
return ssim
def mean_squared_error(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=-1)
def customLoss(yTrue,yPred):
# print(backend.shape(yTrue))
# print(backend.shape(yPred))
ssimVal = kerasSSIM(yTrue,yPred)
print(ssimVal)
return alpha * (1-ssimVal) + (1-alpha) * mean_squared_error(yTrue, yPred)
dirArray = ["Pt-Mo5_input_images", "Pt_input_images", "Pt-Mo50_input_images"]
matl = dirArray[1] #specify desired material here
#Parses image data into ndarrays, then slices each array to be the minimum width and height of the group.
#Thus, formattedConvImages and formattedMultiImages will have arrays of all the same size.
convImages = []
multiImages = []
widths = []
heights = []
for d in range (0, 3):
matl = dirArray[d]
for i in range(0,getNumImages(matl)):
convArr = np.loadtxt(getConvPath(matl, i))
multiArr = io.imread(getMultislicePath(matl,i))
#TODO: PLEASE DELETE THIS LINE AFTER DATA PROCESSING
if (len(convArr[0]) <= 256 and len(convArr) <= 256):
widths.append(len(convArr[0]))
heights.append(len(convArr))
convImages.append(convArr)
multiImages.append(multiArr)
minWidth = min(widths)
minHeight = min(heights)
print(minWidth)
print(minHeight)
print(len(convImages))
print(len(multiImages))
print(np.min(convImages[0]))
print(np.max(convImages[0]))
print(np.min(multiImages[0]))
print(np.max(multiImages[0]))
#split data using sklearn
x =convImages
y =multiImages
X_train, X_test, Y_train, Y_test = train_test_split(x, y)
#format data into 256 by 256
newX_Train = formatImages(X_train,256,256)
newY_Train = formatImages(Y_train,256,256)
newX_Test = formatImages(X_test,256,256)
newY_Test = formatImages(Y_test,256,256)
formattedX_Train = np.ndarray(shape = (0,256*256))
for i in range(0,len(newX_Train)):
tempX_Train = newX_Train[i].flatten()
formattedX_Train = np.vstack([formattedX_Train, tempX_Train])
formattedY_Train = np.ndarray(shape = (0,256*256))
for i in range(0,len(newY_Train)):
tempY_Train = newY_Train[i].flatten()
formattedY_Train = np.vstack([formattedY_Train, tempY_Train])
formattedX_Test = np.ndarray(shape = (0,256*256))
for i in range(0,len(newX_Test)):
tempX_Test = newX_Test[i].flatten()
formattedX_Test = np.vstack([formattedX_Test, tempX_Test])
formattedY_Test = np.ndarray(shape = (0,256*256))
for i in range(0,len(newY_Test)):
tempY_Test = newY_Test[i].flatten()
formattedY_Test = np.vstack([formattedY_Test, tempY_Test])
print(formattedX_Train.shape)
print(formattedY_Train.shape)
print(formattedX_Test.shape)
print(formattedY_Test.shape)
model = Sequential()
model.add(Dense(4096, activation='relu', input_dim=65536, kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(1024, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(256, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(128, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(128, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(128, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(128, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dense(512, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dense(1024, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
model.add(Dense(65536, activation='relu', kernel_initializer=initializers.he_normal(seed=None)))
alpha = 0.9
"""structural similarity measurement system."""
K1 = 0.01
K2 = 0.03
## L, number of pixels, C1, C2, two constants
L = 255
C1 = math.sqrt(K1 * L)
C2 = math.sqrt(K2 * L)
#sgd = optimizers.SGD(lr=1e-5, decay=1e-6, momentum=0.9, nesterov=True)
adam = optimizers.Adam(lr=1e-5, beta_1=0.45, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(
optimizer=adam,
loss=[customLoss],
metrics=['accuracy']
)
model.summary()
#Training starts here
# Fit the model
history = model.fit(formattedX_Train, formattedY_Train, epochs=400, batch_size=3, verbose=2)
# evaluate the model
scores = model.evaluate(formattedX_Train, formattedY_Train)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# list all data in history
print(history.history.keys())
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Lossadam.png')
#prediction
y_pred = model.predict(formattedX_Train)
for i in range (14):
if (i%3 == 0):
f, axarr = plt.subplots(4, 3)
#Multislice Images
image = Y_test[i]
image = image.clip(min=0)
axarr[0,i%3].imshow(image, cmap=plt.get_cmap('gray'))
axarr[0,i%3].set_title("Multislice Image " + str(i))
#Predicted Images
image_y = y_pred[i, :].reshape([256, 256])
image_y = image_y.clip(min=0)
axarr[1,i%3].imshow(image_y, cmap=plt.get_cmap('gray'))
axarr[1,i%3].set_title("Prediction Image " + str(i))
#Convolutional Images
image_x = formattedX_Test[i, :].reshape([256, 256])
image_x = image_x.clip(min=0)
axarr[2,i%3].imshow(image_x, cmap=plt.get_cmap('gray'))
axarr[2,i%3].set_title("Convolution Image " + str(i))
#Cut padding Image
temp = cutPadding(image_y,len(image), len(image[0]))
axarr[3,i%3].imshow(temp, cmap=plt.get_cmap('gray'))
axarr[3,i%3].set_title("Prediction W/ Padding " + str(i))
formatY = formattedY_Test[i, :].reshape([256, 256])
print(str(i))
print("The RMSE is "+ str(np.sqrt(np.mean(np.power((image_y-formatY),2)))*100) + " %")
print("The RMSE w/o padding is "+ str(np.sqrt(np.mean(np.power((temp-image),2)))*100) + " %")
print("The Max-Min RMSE is "+ str(np.sqrt(np.mean(np.power((image_y-formatY),2)))/(np.max(image) - np.min(image))*100) + " %")
print("SSIM is :" + str(ssim(image, temp.astype(np.float32), data_range=temp.max() - temp.min())) + "\n")
print("Standard Dev RMSE is " + str(np.sqrt(np.mean(np.power((image_y-formatY),2)))/(np.std(image_y)*100)) + " %")
if (i%3 == 2 or i == 14):
#plt.show()
plt.savefig('Prediction with adam' + str(i) + '.png')
plt.clf()
'''
# calculate predictions
# print(str(i))
# print("The RMSE is "+ str(np.sqrt(np.mean(np.power((image_y-formatY),2)))*100) + " %")
# print("The RMSE w/o padding is "+ str(np.sqrt(np.mean(np.power((temp-image),2)))*100) + " %")
# print("The Max-Min RMSE is "+ str(np.sqrt(np.mean(np.power((image_y-formatY),2)))/(np.max(image) - np.min(image))*100) + " %")
# print("SSIM is :" + str(ssim(image, temp.astype(np.float32), data_range=temp.max() - temp.min())) + "\n")
# round predictions
rounded = [round(x[0]) for x in predictions]
print(rounded)
'''
| 11,895 | 34.939577 | 130 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.