repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ZeCon | ZeCon-main/CLIP/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if torch.__version__.split(".") < ["1", "7", "1"]:
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 8,433 | 36.484444 | 149 | py |
ZeCon | ZeCon-main/CLIP/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 17,242 | 38.822171 | 178 | py |
ZeCon | ZeCon-main/CLIP/tests/test_consistency.py | import numpy as np
import pytest
import torch
from PIL import Image
import clip
@pytest.mark.parametrize('model_name', clip.available_models())
def test_consistency(model_name):
device = "cpu"
jit_model, transform = clip.load(model_name, device=device, jit=True)
py_model, _ = clip.load(model_name, device=device, jit=False)
image = transform(Image.open("CLIP.png")).unsqueeze(0).to(device)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
with torch.no_grad():
logits_per_image, _ = jit_model(image, text)
jit_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
logits_per_image, _ = py_model(image, text)
py_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
assert np.allclose(jit_probs, py_probs, atol=0.01, rtol=0.1)
| 812 | 30.269231 | 73 | py |
ZeCon | ZeCon-main/guided_diffusion/setup.py | from setuptools import setup
setup(
name="guided-diffusion",
py_modules=["guided_diffusion"],
install_requires=["blobfile>=1.0.5", "torch", "tqdm"],
)
| 164 | 19.625 | 58 | py |
ZeCon | ZeCon-main/guided_diffusion/scripts/image_sample.py | """
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
from guided_diffusion import dist_util, logger
from guided_diffusion.script_util import (
NUM_CLASSES,
model_and_diffusion_defaults,
create_model_and_diffusion,
add_dict_to_argparser,
args_to_dict,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
logger.log("sampling...")
all_images = []
all_labels = []
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = {}
if args.class_cond:
classes = th.randint(
low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev()
)
model_kwargs["y"] = classes
sample_fn = (
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
)
sample = sample_fn(
model,
(args.batch_size, 3, args.image_size, args.image_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
if args.class_cond:
gathered_labels = [
th.zeros_like(classes) for _ in range(dist.get_world_size())
]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
if args.class_cond:
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[: args.num_samples]
if dist.get_rank() == 0:
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
logger.log(f"saving to {out_path}")
if args.class_cond:
np.savez(out_path, arr, label_arr)
else:
np.savez(out_path, arr)
dist.barrier()
logger.log("sampling complete")
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=10000,
batch_size=16,
use_ddim=False,
model_path="",
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 3,398 | 30.183486 | 88 | py |
ZeCon | ZeCon-main/guided_diffusion/scripts/super_res_sample.py | """
Generate a large batch of samples from a super resolution model, given a batch
of samples from a regular model from image_sample.py.
"""
import argparse
import os
import blobfile as bf
import numpy as np
import torch as th
import torch.distributed as dist
from guided_diffusion import dist_util, logger
from guided_diffusion.script_util import (
sr_model_and_diffusion_defaults,
sr_create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model...")
model, diffusion = sr_create_model_and_diffusion(
**args_to_dict(args, sr_model_and_diffusion_defaults().keys())
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
logger.log("loading data...")
data = load_data_for_worker(args.base_samples, args.batch_size, args.class_cond)
logger.log("creating samples...")
all_images = []
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = next(data)
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
sample = diffusion.p_sample_loop(
model,
(args.batch_size, 3, args.large_size, args.large_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
all_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(all_samples, sample) # gather not supported with NCCL
for sample in all_samples:
all_images.append(sample.cpu().numpy())
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
if dist.get_rank() == 0:
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
logger.log(f"saving to {out_path}")
np.savez(out_path, arr)
dist.barrier()
logger.log("sampling complete")
def load_data_for_worker(base_samples, batch_size, class_cond):
with bf.BlobFile(base_samples, "rb") as f:
obj = np.load(f)
image_arr = obj["arr_0"]
if class_cond:
label_arr = obj["arr_1"]
rank = dist.get_rank()
num_ranks = dist.get_world_size()
buffer = []
label_buffer = []
while True:
for i in range(rank, len(image_arr), num_ranks):
buffer.append(image_arr[i])
if class_cond:
label_buffer.append(label_arr[i])
if len(buffer) == batch_size:
batch = th.from_numpy(np.stack(buffer)).float()
batch = batch / 127.5 - 1.0
batch = batch.permute(0, 3, 1, 2)
res = dict(low_res=batch)
if class_cond:
res["y"] = th.from_numpy(np.stack(label_buffer))
yield res
buffer, label_buffer = [], []
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=10000,
batch_size=16,
use_ddim=False,
base_samples="",
model_path="",
)
defaults.update(sr_model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 3,725 | 30.05 | 84 | py |
ZeCon | ZeCon-main/guided_diffusion/scripts/classifier_sample.py | """
Like image_sample.py, but use a noisy image classifier to guide the sampling
process towards more realistic images.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
import torch.nn.functional as F
from guided_diffusion import dist_util, logger
from guided_diffusion.script_util import (
NUM_CLASSES,
model_and_diffusion_defaults,
classifier_defaults,
create_model_and_diffusion,
create_classifier,
add_dict_to_argparser,
args_to_dict,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
logger.log("loading classifier...")
classifier = create_classifier(**args_to_dict(args, classifier_defaults().keys()))
classifier.load_state_dict(
dist_util.load_state_dict(args.classifier_path, map_location="cpu")
)
classifier.to(dist_util.dev())
if args.classifier_use_fp16:
classifier.convert_to_fp16()
classifier.eval()
def cond_fn(x, t, y=None):
assert y is not None
with th.enable_grad():
x_in = x.detach().requires_grad_(True)
logits = classifier(x_in, t)
log_probs = F.log_softmax(logits, dim=-1)
selected = log_probs[range(len(logits)), y.view(-1)]
return th.autograd.grad(selected.sum(), x_in)[0] * args.classifier_scale
def model_fn(x, t, y=None):
assert y is not None
return model(x, t, y if args.class_cond else None)
logger.log("sampling...")
all_images = []
all_labels = []
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = {}
classes = th.randint(
low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev()
)
model_kwargs["y"] = classes
sample_fn = (
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
)
sample = sample_fn(
model_fn,
(args.batch_size, 3, args.image_size, args.image_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
cond_fn=cond_fn,
device=dist_util.dev(),
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
gathered_labels = [th.zeros_like(classes) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[: args.num_samples]
if dist.get_rank() == 0:
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
logger.log(f"saving to {out_path}")
np.savez(out_path, arr, label_arr)
dist.barrier()
logger.log("sampling complete")
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=10000,
batch_size=16,
use_ddim=False,
model_path="",
classifier_path="",
classifier_scale=1.0,
)
defaults.update(model_and_diffusion_defaults())
defaults.update(classifier_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 4,266 | 31.325758 | 88 | py |
ZeCon | ZeCon-main/guided_diffusion/scripts/classifier_train.py | """
Train a noised image classifier on ImageNet.
"""
import argparse
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
from guided_diffusion import dist_util, logger
from guided_diffusion.fp16_util import MixedPrecisionTrainer
from guided_diffusion.image_datasets import load_data
from guided_diffusion.resample import create_named_schedule_sampler
from guided_diffusion.script_util import (
add_dict_to_argparser,
args_to_dict,
classifier_and_diffusion_defaults,
create_classifier_and_diffusion,
)
from guided_diffusion.train_util import parse_resume_step_from_filename, log_loss_dict
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_classifier_and_diffusion(
**args_to_dict(args, classifier_and_diffusion_defaults().keys())
)
model.to(dist_util.dev())
if args.noised:
schedule_sampler = create_named_schedule_sampler(
args.schedule_sampler, diffusion
)
resume_step = 0
if args.resume_checkpoint:
resume_step = parse_resume_step_from_filename(args.resume_checkpoint)
if dist.get_rank() == 0:
logger.log(
f"loading model from checkpoint: {args.resume_checkpoint}... at {resume_step} step"
)
model.load_state_dict(
dist_util.load_state_dict(
args.resume_checkpoint, map_location=dist_util.dev()
)
)
# Needed for creating correct EMAs and fp16 parameters.
dist_util.sync_params(model.parameters())
mp_trainer = MixedPrecisionTrainer(
model=model, use_fp16=args.classifier_use_fp16, initial_lg_loss_scale=16.0
)
model = DDP(
model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
logger.log("creating data loader...")
data = load_data(
data_dir=args.data_dir,
batch_size=args.batch_size,
image_size=args.image_size,
class_cond=True,
random_crop=True,
)
if args.val_data_dir:
val_data = load_data(
data_dir=args.val_data_dir,
batch_size=args.batch_size,
image_size=args.image_size,
class_cond=True,
)
else:
val_data = None
logger.log(f"creating optimizer...")
opt = AdamW(mp_trainer.master_params, lr=args.lr, weight_decay=args.weight_decay)
if args.resume_checkpoint:
opt_checkpoint = bf.join(
bf.dirname(args.resume_checkpoint), f"opt{resume_step:06}.pt"
)
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
opt.load_state_dict(
dist_util.load_state_dict(opt_checkpoint, map_location=dist_util.dev())
)
logger.log("training classifier model...")
def forward_backward_log(data_loader, prefix="train"):
batch, extra = next(data_loader)
labels = extra["y"].to(dist_util.dev())
batch = batch.to(dist_util.dev())
# Noisy images
if args.noised:
t, _ = schedule_sampler.sample(batch.shape[0], dist_util.dev())
batch = diffusion.q_sample(batch, t)
else:
t = th.zeros(batch.shape[0], dtype=th.long, device=dist_util.dev())
for i, (sub_batch, sub_labels, sub_t) in enumerate(
split_microbatches(args.microbatch, batch, labels, t)
):
logits = model(sub_batch, timesteps=sub_t)
loss = F.cross_entropy(logits, sub_labels, reduction="none")
losses = {}
losses[f"{prefix}_loss"] = loss.detach()
losses[f"{prefix}_acc@1"] = compute_top_k(
logits, sub_labels, k=1, reduction="none"
)
losses[f"{prefix}_acc@5"] = compute_top_k(
logits, sub_labels, k=5, reduction="none"
)
log_loss_dict(diffusion, sub_t, losses)
del losses
loss = loss.mean()
if loss.requires_grad:
if i == 0:
mp_trainer.zero_grad()
mp_trainer.backward(loss * len(sub_batch) / len(batch))
for step in range(args.iterations - resume_step):
logger.logkv("step", step + resume_step)
logger.logkv(
"samples",
(step + resume_step + 1) * args.batch_size * dist.get_world_size(),
)
if args.anneal_lr:
set_annealed_lr(opt, args.lr, (step + resume_step) / args.iterations)
forward_backward_log(data)
mp_trainer.optimize(opt)
if val_data is not None and not step % args.eval_interval:
with th.no_grad():
with model.no_sync():
model.eval()
forward_backward_log(val_data, prefix="val")
model.train()
if not step % args.log_interval:
logger.dumpkvs()
if (
step
and dist.get_rank() == 0
and not (step + resume_step) % args.save_interval
):
logger.log("saving model...")
save_model(mp_trainer, opt, step + resume_step)
if dist.get_rank() == 0:
logger.log("saving model...")
save_model(mp_trainer, opt, step + resume_step)
dist.barrier()
def set_annealed_lr(opt, base_lr, frac_done):
lr = base_lr * (1 - frac_done)
for param_group in opt.param_groups:
param_group["lr"] = lr
def save_model(mp_trainer, opt, step):
if dist.get_rank() == 0:
th.save(
mp_trainer.master_params_to_state_dict(mp_trainer.master_params),
os.path.join(logger.get_dir(), f"model{step:06d}.pt"),
)
th.save(opt.state_dict(), os.path.join(logger.get_dir(), f"opt{step:06d}.pt"))
def compute_top_k(logits, labels, k, reduction="mean"):
_, top_ks = th.topk(logits, k, dim=-1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def split_microbatches(microbatch, *args):
bs = len(args[0])
if microbatch == -1 or microbatch >= bs:
yield tuple(args)
else:
for i in range(0, bs, microbatch):
yield tuple(x[i : i + microbatch] if x is not None else None for x in args)
def create_argparser():
defaults = dict(
data_dir="",
val_data_dir="",
noised=True,
iterations=150000,
lr=3e-4,
weight_decay=0.0,
anneal_lr=False,
batch_size=4,
microbatch=-1,
schedule_sampler="uniform",
resume_checkpoint="",
log_interval=10,
eval_interval=5,
save_interval=10000,
)
defaults.update(classifier_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 7,313 | 31.220264 | 99 | py |
ZeCon | ZeCon-main/guided_diffusion/scripts/image_nll.py | """
Approximate the bits/dimension for an image model.
"""
import argparse
import os
import numpy as np
import torch.distributed as dist
from guided_diffusion import dist_util, logger
from guided_diffusion.image_datasets import load_data
from guided_diffusion.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
add_dict_to_argparser,
args_to_dict,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
model.eval()
logger.log("creating data loader...")
data = load_data(
data_dir=args.data_dir,
batch_size=args.batch_size,
image_size=args.image_size,
class_cond=args.class_cond,
deterministic=True,
)
logger.log("evaluating...")
run_bpd_evaluation(model, diffusion, data, args.num_samples, args.clip_denoised)
def run_bpd_evaluation(model, diffusion, data, num_samples, clip_denoised):
all_bpd = []
all_metrics = {"vb": [], "mse": [], "xstart_mse": []}
num_complete = 0
while num_complete < num_samples:
batch, model_kwargs = next(data)
batch = batch.to(dist_util.dev())
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
minibatch_metrics = diffusion.calc_bpd_loop(
model, batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
for key, term_list in all_metrics.items():
terms = minibatch_metrics[key].mean(dim=0) / dist.get_world_size()
dist.all_reduce(terms)
term_list.append(terms.detach().cpu().numpy())
total_bpd = minibatch_metrics["total_bpd"]
total_bpd = total_bpd.mean() / dist.get_world_size()
dist.all_reduce(total_bpd)
all_bpd.append(total_bpd.item())
num_complete += dist.get_world_size() * batch.shape[0]
logger.log(f"done {num_complete} samples: bpd={np.mean(all_bpd)}")
if dist.get_rank() == 0:
for name, terms in all_metrics.items():
out_path = os.path.join(logger.get_dir(), f"{name}_terms.npz")
logger.log(f"saving {name} terms to {out_path}")
np.savez(out_path, np.mean(np.stack(terms), axis=0))
dist.barrier()
logger.log("evaluation complete")
def create_argparser():
defaults = dict(
data_dir="", clip_denoised=True, num_samples=1000, batch_size=1, model_path=""
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 2,934 | 29.257732 | 86 | py |
ZeCon | ZeCon-main/guided_diffusion/scripts/super_res_train.py | """
Train a super-resolution model.
"""
import argparse
import torch.nn.functional as F
from guided_diffusion import dist_util, logger
from guided_diffusion.image_datasets import load_data
from guided_diffusion.resample import create_named_schedule_sampler
from guided_diffusion.script_util import (
sr_model_and_diffusion_defaults,
sr_create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
)
from guided_diffusion.train_util import TrainLoop
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model...")
model, diffusion = sr_create_model_and_diffusion(
**args_to_dict(args, sr_model_and_diffusion_defaults().keys())
)
model.to(dist_util.dev())
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
logger.log("creating data loader...")
data = load_superres_data(
args.data_dir,
args.batch_size,
large_size=args.large_size,
small_size=args.small_size,
class_cond=args.class_cond,
)
logger.log("training...")
TrainLoop(
model=model,
diffusion=diffusion,
data=data,
batch_size=args.batch_size,
microbatch=args.microbatch,
lr=args.lr,
ema_rate=args.ema_rate,
log_interval=args.log_interval,
save_interval=args.save_interval,
resume_checkpoint=args.resume_checkpoint,
use_fp16=args.use_fp16,
fp16_scale_growth=args.fp16_scale_growth,
schedule_sampler=schedule_sampler,
weight_decay=args.weight_decay,
lr_anneal_steps=args.lr_anneal_steps,
).run_loop()
def load_superres_data(data_dir, batch_size, large_size, small_size, class_cond=False):
data = load_data(
data_dir=data_dir,
batch_size=batch_size,
image_size=large_size,
class_cond=class_cond,
)
for large_batch, model_kwargs in data:
model_kwargs["low_res"] = F.interpolate(large_batch, small_size, mode="area")
yield large_batch, model_kwargs
def create_argparser():
defaults = dict(
data_dir="",
schedule_sampler="uniform",
lr=1e-4,
weight_decay=0.0,
lr_anneal_steps=0,
batch_size=1,
microbatch=-1,
ema_rate="0.9999",
log_interval=10,
save_interval=10000,
resume_checkpoint="",
use_fp16=False,
fp16_scale_growth=1e-3,
)
defaults.update(sr_model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 2,695 | 26.232323 | 87 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/resample.py | from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| 5,689 | 35.709677 | 87 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/losses.py | """
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| 2,534 | 31.5 | 109 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/image_datasets.py | import math
import random
from PIL import Image
import blobfile as bf
from mpi4py import MPI
import numpy as np
from torch.utils.data import DataLoader, Dataset
def load_data(
*,
data_dir,
batch_size,
image_size,
class_cond=False,
deterministic=False,
random_crop=False,
random_flip=True,
):
"""
For a dataset, create a generator over (images, kwargs) pairs.
Each images is an NCHW float tensor, and the kwargs dict contains zero or
more keys, each of which map to a batched Tensor of their own.
The kwargs dict can be used for class labels, in which case the key is "y"
and the values are integer tensors of class labels.
:param data_dir: a dataset directory.
:param batch_size: the batch size of each returned pair.
:param image_size: the size to which images are resized.
:param class_cond: if True, include a "y" key in returned dicts for class
label. If classes are not available and this is true, an
exception will be raised.
:param deterministic: if True, yield results in a deterministic order.
:param random_crop: if True, randomly crop the images for augmentation.
:param random_flip: if True, randomly flip the images for augmentation.
"""
if not data_dir:
raise ValueError("unspecified data directory")
all_files = _list_image_files_recursively(data_dir)
classes = None
if class_cond:
# Assume classes are the first part of the filename,
# before an underscore.
class_names = [bf.basename(path).split("_")[0] for path in all_files]
sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
classes = [sorted_classes[x] for x in class_names]
dataset = ImageDataset(
image_size,
all_files,
classes=classes,
shard=MPI.COMM_WORLD.Get_rank(),
num_shards=MPI.COMM_WORLD.Get_size(),
random_crop=random_crop,
random_flip=random_flip,
)
if deterministic:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
)
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
)
while True:
yield from loader
def _list_image_files_recursively(data_dir):
results = []
for entry in sorted(bf.listdir(data_dir)):
full_path = bf.join(data_dir, entry)
ext = entry.split(".")[-1]
if "." in entry and ext.lower() in ["jpg", "jpeg", "png", "gif"]:
results.append(full_path)
elif bf.isdir(full_path):
results.extend(_list_image_files_recursively(full_path))
return results
class ImageDataset(Dataset):
def __init__(
self,
resolution,
image_paths,
classes=None,
shard=0,
num_shards=1,
random_crop=False,
random_flip=True,
):
super().__init__()
self.resolution = resolution
self.local_images = image_paths[shard:][::num_shards]
self.local_classes = None if classes is None else classes[shard:][::num_shards]
self.random_crop = random_crop
self.random_flip = random_flip
def __len__(self):
return len(self.local_images)
def __getitem__(self, idx):
path = self.local_images[idx]
with bf.BlobFile(path, "rb") as f:
pil_image = Image.open(f)
pil_image.load()
pil_image = pil_image.convert("RGB")
if self.random_crop:
arr = random_crop_arr(pil_image, self.resolution)
else:
arr = center_crop_arr(pil_image, self.resolution)
if self.random_flip and random.random() < 0.5:
arr = arr[:, ::-1]
arr = arr.astype(np.float32) / 127.5 - 1
out_dict = {}
if self.local_classes is not None:
out_dict["y"] = np.array(self.local_classes[idx], dtype=np.int64)
return np.transpose(arr, [2, 0, 1]), out_dict
def center_crop_arr(pil_image, image_size):
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * image_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = image_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = (arr.shape[0] - image_size) // 2
crop_x = (arr.shape[1] - image_size) // 2
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * smaller_dim_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = smaller_dim_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = random.randrange(arr.shape[0] - image_size + 1)
crop_x = random.randrange(arr.shape[1] - image_size + 1)
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
| 5,930 | 34.303571 | 88 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/nn.py | """
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with th.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with th.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = th.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
| 5,020 | 28.362573 | 88 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/fp16_util.py | """
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from . import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if l.bias is not None:
l.bias.data = l.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2 ** self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
| 7,941 | 32.510549 | 114 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/unet.py | from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), True)
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
return self.out(h)
# CUT feature extraction
def forward_enc(self,x, timesteps, nce_layers, y=None):
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
feats = []
h = x.type(self.dtype)
for i, module in enumerate(self.input_blocks):
h = module(h, emb)
hs.append(h)
if i in nce_layers:
feats.append(h)
# h = self.middle_block(h, emb)
# for module in self.output_blocks:
# h = th.cat([h, hs.pop()], dim=1)
# h = module(h, emb)
h = h.type(x.dtype)
return feats
class SuperResModel(UNetModel):
"""
A UNetModel that performs super-resolution.
Expects an extra kwarg `low_res` to condition on a low-resolution image.
"""
def __init__(self, image_size, in_channels, *args, **kwargs):
super().__init__(image_size, in_channels * 2, *args, **kwargs)
def forward(self, x, timesteps, low_res=None, **kwargs):
_, _, new_height, new_width = x.shape
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
x = th.cat([x, upsampled], dim=1)
return super().forward(x, timesteps, **kwargs)
class EncoderUNetModel(nn.Module):
"""
The half UNet model with attention and timestep embedding.
For usage, see UNet.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
pool="adaptive",
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.pool = pool
if pool == "adaptive":
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.AdaptiveAvgPool2d((1, 1)),
zero_module(conv_nd(dims, ch, out_channels, 1)),
nn.Flatten(),
)
elif pool == "attention":
assert num_head_channels != -1
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
AttentionPool2d(
(image_size // ds), ch, num_head_channels, out_channels
),
)
elif pool == "spatial":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
nn.ReLU(),
nn.Linear(2048, self.out_channels),
)
elif pool == "spatial_v2":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
normalization(2048),
nn.SiLU(),
nn.Linear(2048, self.out_channels),
)
else:
raise NotImplementedError(f"Unexpected {pool} pooling")
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = self.middle_block(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = th.cat(results, axis=-1)
return self.out(h)
else:
h = h.type(x.dtype)
return self.out(h)
| 32,001 | 33.822633 | 124 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/gaussian_diffusion.py | """
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
from tkinter import X
import numpy as np
import torch as th
from .nn import mean_flat
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps, lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = enum.auto() # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self, *, betas, model_mean_type, model_var_type, loss_type, rescale_timesteps=False,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
else:
raise NotImplementedError(self.model_mean_type)
assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, self._scale_timesteps(t), **model_kwargs)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
return out
def p_sample(
self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
# postprocess_fn=None,
# randomize_class=False,
# resizers = None,
# range_t=0,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
batch_size = shape[0]
init_image_batch = th.tile(init_image, dims=(batch_size, 1, 1, 1))
img = self.q_sample(
x_start=init_image_batch,
t=th.tensor(indices[0], dtype=th.long, device=device),
noise=img,
)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
# if randomize_class and "y" in model_kwargs:
# model_kwargs["y"] = th.randint(
# low=0,
# high=model.num_classes,
# size=model_kwargs["y"].shape,
# device=model_kwargs["y"].device,
# )
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,# if t[0] > 0 else None,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1] # self.num_timesteps
# import ipdb; ipdb.set_trace()
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
# if randomize_class and "y" in model_kwargs:
# model_kwargs["y"] = th.randint(
# low=0,
# high=model.num_classes,
# size=model_kwargs["y"].shape,
# device=model_kwargs["y"].device,
# )
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn= cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def ddim_reverse_sample(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample_loop(self,
model,
x,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
skip_timesteps=0,
device=None):
if device is None:
device = next(model.parameters()).device
img = x
indices = list(range(self.num_timesteps - skip_timesteps)) # self.num_timesteps
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * x.shape[0], device=device)
with th.no_grad():
out = self.ddim_reverse_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
eta=eta,
)
# import ipdb;ipdb.set_trace()
img = out["sample"]
return img
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(true_mean, true_log_variance_clipped, out["mean"], out["log_variance"])
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 36,586 | 38.130481 | 129 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/train_util.py | import copy
import functools
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
from . import dist_util, logger
from .fp16_util import MixedPrecisionTrainer
from .nn import update_ema
from .resample import LossAwareSampler, UniformSampler
# For ImageNet experiments, this was a good default value.
# We found that the lg_loss_scale quickly climbed to
# 20-21 within the first ~1K steps of training.
INITIAL_LOG_LOSS_SCALE = 20.0
class TrainLoop:
def __init__(
self,
*,
model,
diffusion,
data,
batch_size,
microbatch,
lr,
ema_rate,
log_interval,
save_interval,
resume_checkpoint,
use_fp16=False,
fp16_scale_growth=1e-3,
schedule_sampler=None,
weight_decay=0.0,
lr_anneal_steps=0,
):
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = microbatch if microbatch > 0 else batch_size
self.lr = lr
self.ema_rate = (
[ema_rate]
if isinstance(ema_rate, float)
else [float(x) for x in ema_rate.split(",")]
)
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.step = 0
self.resume_step = 0
self.global_batch = self.batch_size * dist.get_world_size()
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(
model=self.model,
use_fp16=self.use_fp16,
fp16_scale_growth=fp16_scale_growth,
)
self.opt = AdamW(
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
)
if self.resume_step:
self._load_optimizer_state()
# Model was resumed, either due to a restart or a checkpoint
# being specified at the command line.
self.ema_params = [
self._load_ema_parameters(rate) for rate in self.ema_rate
]
else:
self.ema_params = [
copy.deepcopy(self.mp_trainer.master_params)
for _ in range(len(self.ema_rate))
]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(
self.model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
else:
if dist.get_world_size() > 1:
logger.warn(
"Distributed training requires CUDA. "
"Gradients will not be synchronized properly!"
)
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if dist.get_rank() == 0:
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint, map_location=dist_util.dev()
)
)
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.mp_trainer.master_params)
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if dist.get_rank() == 0:
logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
state_dict = dist_util.load_state_dict(
ema_checkpoint, map_location=dist_util.dev()
)
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
opt_checkpoint = bf.join(
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
)
if bf.exists(opt_checkpoint):
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def run_loop(self):
while (
not self.lr_anneal_steps
or self.step + self.resume_step < self.lr_anneal_steps
):
batch, cond = next(self.data)
self.run_step(batch, cond)
if self.step % self.log_interval == 0:
logger.dumpkvs()
if self.step % self.save_interval == 0:
self.save()
# Run for a finite amount of time in integration tests.
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
return
self.step += 1
# Save the last checkpoint if it wasn't already saved.
if (self.step - 1) % self.save_interval != 0:
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self._update_ema()
self._anneal_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i : i + self.microbatch].to(dist_util.dev())
micro_cond = {
k: v[i : i + self.microbatch].to(dist_util.dev())
for k, v in cond.items()
}
last_batch = (i + self.microbatch) >= batch.shape[0]
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
micro,
t,
model_kwargs=micro_cond,
)
if last_batch or not self.use_ddp:
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(
t, losses["loss"].detach()
)
loss = (losses["loss"] * weights).mean()
log_loss_dict(
self.diffusion, t, {k: v * weights for k, v in losses.items()}
)
self.mp_trainer.backward(loss)
def _update_ema(self):
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.mp_trainer.master_params, rate=rate)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def log_step(self):
logger.logkv("step", self.step + self.resume_step)
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
def save(self):
def save_checkpoint(rate, params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
if dist.get_rank() == 0:
logger.log(f"saving model {rate}...")
if not rate:
filename = f"model{(self.step+self.resume_step):06d}.pt"
else:
filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(state_dict, f)
save_checkpoint(0, self.mp_trainer.master_params)
for rate, params in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if dist.get_rank() == 0:
with bf.BlobFile(
bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
"wb",
) as f:
th.save(self.opt.state_dict(), f)
dist.barrier()
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def get_blob_logdir():
# You can change this to be a separate path to save checkpoints to
# a blobstore or some external drive.
return logger.get_dir()
def find_resume_checkpoint():
# On your infrastructure, you may want to override this to automatically
# discover the latest checkpoint on your blob storage, etc.
return None
def find_ema_checkpoint(main_checkpoint, step, rate):
if main_checkpoint is None:
return None
filename = f"ema_{rate}_{(step):06d}.pt"
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
| 10,604 | 34.115894 | 88 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/respace.py | import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
| 5,193 | 39.263566 | 85 | py |
ZeCon | ZeCon-main/guided_diffusion/guided_diffusion/dist_util.py | """
Helpers for distributed training.
"""
import io
import os
import socket
import blobfile as bf
from mpi4py import MPI
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
def setup_dist():
"""
Setup a distributed process group.
"""
if dist.is_initialized():
return
os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
comm = MPI.COMM_WORLD
backend = "gloo" if not th.cuda.is_available() else "nccl"
if backend == "gloo":
hostname = "localhost"
else:
hostname = socket.gethostbyname(socket.getfqdn())
os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
os.environ["RANK"] = str(comm.rank)
os.environ["WORLD_SIZE"] = str(comm.size)
port = comm.bcast(_find_free_port(), root=0)
os.environ["MASTER_PORT"] = str(port)
dist.init_process_group(backend=backend, init_method="env://")
def dev():
"""
Get the device to use for torch.distributed.
"""
if th.cuda.is_available():
return th.device(f"cuda")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
chunk_size = 2 ** 30 # MPI has a relatively small size limit
if MPI.COMM_WORLD.Get_rank() == 0:
with bf.BlobFile(path, "rb") as f:
data = f.read()
num_chunks = len(data) // chunk_size
if len(data) % chunk_size:
num_chunks += 1
MPI.COMM_WORLD.bcast(num_chunks)
for i in range(0, len(data), chunk_size):
MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
else:
num_chunks = MPI.COMM_WORLD.bcast(None)
data = bytes()
for _ in range(num_chunks):
data += MPI.COMM_WORLD.bcast(None)
return th.load(io.BytesIO(data), **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
| 2,424 | 24.797872 | 87 | py |
Few-NERD | Few-NERD-main/run_supervised.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert). """
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
# from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from util.supervised_util import convert_examples_to_features, get_labels, read_examples_from_file
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import WEIGHTS_NAME, BertConfig, BertForTokenClassification, BertTokenizer
from util.metric import Metrics
logger = logging.getLogger(__name__)
# ALL_MODELS = sum(
# (tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, )),
# ())
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
# if args.local_rank in [-1, 0]:
# tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
# scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss = 0.0
best_metric = 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2] if args.model_type in ["bert", "xlnet"] else None,
# XLM and RoBERTa don"t use segment_ids
"labels": batch[3]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
# model.zero_grad()
optimizer.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev")
if results["f1"] > best_metric:
best_metric = results["f1"]
output_dir = os.path.join(args.output_dir, "checkpoint-best")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
tokenizer.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
# if args.local_rank in [-1, 0]:
# tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2] if args.model_type in ["bert", "xlnet"] else None,
# XLM and RoBERTa don"t use segment_ids
"labels": batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[0], outputs[1]
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
# memory management
del outputs, tmp_eval_loss, logits
torch.cuda.empty_cache()
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
metric = Metrics()
p, r, f = metric.metrics_by_entity(preds_list, out_label_list)
results = {
"precision": p,
"recall": r,
"f1": f
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}".format(mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--labels", default="", type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action="store_true",
help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true",
help="Whether to run predictions on the test set.")
parser.add_argument("--evaluate_during_training", action="store_true",
help="Whether to run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action="store_true",
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action="store_true",
help="Avoid using CUDA when available")
parser.add_argument("--overwrite_output_dir", action="store_true",
help="Overwrite the content of the output directory")
parser.add_argument("--overwrite_cache", action="store_true",
help="Overwrite the cached training and evaluation sets")
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
parser.add_argument("--fp16", action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument("--fp16_opt_level", type=str, default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
best_checkpoint = os.path.join(args.output_dir, "checkpoint-best")
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0) and not os.path.exists(best_checkpoint):
os.makedirs(best_checkpoint)
logger.info("Saving model checkpoint to %s", best_checkpoint)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(best_checkpoint)
tokenizer.save_pretrained(best_checkpoint)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(best_checkpoint, "training_args.bin"))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(best_checkpoint, do_lower_case=args.do_lower_case)
checkpoints = [best_checkpoint]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(best_checkpoint, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(best_checkpoint)
model.to(args.device)
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test")
# Save results
output_test_results_file = os.path.join(best_checkpoint, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(best_checkpoint, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
| 28,048 | 52.940385 | 184 | py |
Few-NERD | Few-NERD-main/train_demo.py | from transformers import BertTokenizer
from util.data_loader import get_loader
from util.framework import FewShotNERFramework
from util.word_encoder import BERTWordEncoder
from model.proto import Proto
from model.nnshot import NNShot
import sys
import torch
from torch import optim, nn
import numpy as np
import json
import argparse
import os
import torch
import random
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='inter',
help='training mode, must be in [inter, intra]')
parser.add_argument('--trainN', default=2, type=int,
help='N in train')
parser.add_argument('--N', default=2, type=int,
help='N way')
parser.add_argument('--K', default=2, type=int,
help='K shot')
parser.add_argument('--Q', default=3, type=int,
help='Num of query per class')
parser.add_argument('--batch_size', default=4, type=int,
help='batch size')
parser.add_argument('--train_iter', default=600, type=int,
help='num of iters in training')
parser.add_argument('--val_iter', default=100, type=int,
help='num of iters in validation')
parser.add_argument('--test_iter', default=500, type=int,
help='num of iters in testing')
parser.add_argument('--val_step', default=20, type=int,
help='val after training how many iters')
parser.add_argument('--model', default='proto',
help='model name, must be proto, nnshot, or structshot')
parser.add_argument('--max_length', default=100, type=int,
help='max length')
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--grad_iter', default=1, type=int,
help='accumulate gradient every x iterations')
parser.add_argument('--load_ckpt', default=None,
help='load ckpt')
parser.add_argument('--save_ckpt', default=None,
help='save ckpt')
parser.add_argument('--fp16', action='store_true',
help='use nvidia apex fp16')
parser.add_argument('--only_test', action='store_true',
help='only test')
parser.add_argument('--ckpt_name', type=str, default='',
help='checkpoint name.')
parser.add_argument('--seed', type=int, default=0,
help='random seed')
parser.add_argument('--ignore_index', type=int, default=-1,
help='label index to ignore when calculating loss and metrics')
parser.add_argument('--use_sampled_data', action='store_true',
help='use released sampled data, the data should be stored at "data/episode-data/" ')
# only for bert / roberta
parser.add_argument('--pretrain_ckpt', default=None,
help='bert / roberta pre-trained checkpoint')
# only for prototypical networks
parser.add_argument('--dot', action='store_true',
help='use dot instead of L2 distance for proto')
# only for structshot
parser.add_argument('--tau', default=0.05, type=float,
help='StructShot parameter to re-normalizes the transition probabilities')
# experiment
parser.add_argument('--use_sgd_for_bert', action='store_true',
help='use SGD instead of AdamW for BERT.')
opt = parser.parse_args()
trainN = opt.trainN
N = opt.N
K = opt.K
Q = opt.Q
batch_size = opt.batch_size
model_name = opt.model
max_length = opt.max_length
print("{}-way-{}-shot Few-Shot NER".format(N, K))
print("model: {}".format(model_name))
print("max_length: {}".format(max_length))
print('mode: {}'.format(opt.mode))
set_seed(opt.seed)
print('loading model and tokenizer...')
pretrain_ckpt = opt.pretrain_ckpt or 'bert-base-uncased'
word_encoder = BERTWordEncoder(
pretrain_ckpt)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
print('loading data...')
if not opt.use_sampled_data:
opt.train = f'data/{opt.mode}/train.txt'
opt.test = f'data/{opt.mode}/test.txt'
opt.dev = f'data/{opt.mode}/dev.txt'
if not (os.path.exists(opt.train) and os.path.exists(opt.dev) and os.path.exists(opt.test)):
os.system(f'bash data/download.sh {opt.mode}')
else:
opt.train = f'data/episode-data/{opt.mode}/train_{opt.N}_{opt.K}.jsonl'
opt.test = f'data/episode-data/{opt.mode}/test_{opt.N}_{opt.K}.jsonl'
opt.dev = f'data/episode-data/{opt.mode}/dev_{opt.N}_{opt.K}.jsonl'
if not (os.path.exists(opt.train) and os.path.exists(opt.dev) and os.path.exists(opt.test)):
os.system(f'bash data/download.sh episode-data')
os.system('unzip -d data/ data/episode-data.zip')
if opt.mode == "supervised":
print("Warning: you are running few-shot learning methods on `supervised` dataset, if it is not expected, please change to `--mode inter` or `--mode intra`.")
train_data_loader = get_loader(opt.train, tokenizer,
N=trainN, K=K, Q=Q, batch_size=batch_size, max_length=max_length, ignore_index=opt.ignore_index, use_sampled_data=opt.use_sampled_data)
val_data_loader = get_loader(opt.dev, tokenizer,
N=N, K=K, Q=Q, batch_size=batch_size, max_length=max_length, ignore_index=opt.ignore_index, use_sampled_data=opt.use_sampled_data)
test_data_loader = get_loader(opt.test, tokenizer,
N=N, K=K, Q=Q, batch_size=batch_size, max_length=max_length, ignore_index=opt.ignore_index, use_sampled_data=opt.use_sampled_data)
prefix = '-'.join([model_name, opt.mode, str(N), str(K), 'seed'+str(opt.seed)])
if opt.dot:
prefix += '-dot'
if len(opt.ckpt_name) > 0:
prefix += '-' + opt.ckpt_name
if model_name == 'proto':
print('use proto')
model = Proto(word_encoder, dot=opt.dot, ignore_index=opt.ignore_index)
framework = FewShotNERFramework(train_data_loader, val_data_loader, test_data_loader, use_sampled_data=opt.use_sampled_data)
elif model_name == 'nnshot':
print('use nnshot')
model = NNShot(word_encoder, dot=opt.dot, ignore_index=opt.ignore_index)
framework = FewShotNERFramework(train_data_loader, val_data_loader, test_data_loader, use_sampled_data=opt.use_sampled_data)
elif model_name == 'structshot':
print('use structshot')
model = NNShot(word_encoder, dot=opt.dot, ignore_index=opt.ignore_index)
framework = FewShotNERFramework(train_data_loader, val_data_loader, test_data_loader, N=opt.N, tau=opt.tau, train_fname=opt.train, viterbi=True, use_sampled_data=opt.use_sampled_data)
else:
raise NotImplementedError
if not os.path.exists('checkpoint'):
os.mkdir('checkpoint')
ckpt = 'checkpoint/{}.pth.tar'.format(prefix)
if opt.save_ckpt:
ckpt = opt.save_ckpt
print('model-save-path:', ckpt)
if torch.cuda.is_available():
model.cuda()
if not opt.only_test:
if opt.lr == -1:
opt.lr = 2e-5
framework.train(model, prefix,
load_ckpt=opt.load_ckpt, save_ckpt=ckpt,
val_step=opt.val_step, fp16=opt.fp16,
train_iter=opt.train_iter, warmup_step=int(opt.train_iter * 0.1), val_iter=opt.val_iter, learning_rate=opt.lr, use_sgd_for_bert=opt.use_sgd_for_bert)
else:
ckpt = opt.load_ckpt
if ckpt is None:
print("Warning: --load_ckpt is not specified. Will load Hugginface pre-trained checkpoint.")
ckpt = 'none'
# test
precision, recall, f1, fp, fn, within, outer = framework.eval(model, opt.test_iter, ckpt=ckpt)
print("RESULT: precision: %.4f, recall: %.4f, f1:%.4f" % (precision, recall, f1))
print('ERROR ANALYSIS: fp: %.4f, fn: %.4f, within:%.4f, outer: %.4f'%(fp, fn, within, outer))
if __name__ == "__main__":
main()
| 8,044 | 42.02139 | 191 | py |
Few-NERD | Few-NERD-main/util/word_encoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import os
from torch import optim
from transformers import BertTokenizer, BertModel, BertForMaskedLM, BertForSequenceClassification, RobertaModel, RobertaTokenizer, RobertaForSequenceClassification
class BERTWordEncoder(nn.Module):
def __init__(self, pretrain_path):
nn.Module.__init__(self)
self.bert = BertModel.from_pretrained(pretrain_path)
def forward(self, words, masks):
outputs = self.bert(words, attention_mask=masks, output_hidden_states=True, return_dict=True)
#outputs = self.bert(inputs['word'], attention_mask=inputs['mask'], output_hidden_states=True, return_dict=True)
# use the sum of the last 4 layers
last_four_hidden_states = torch.cat([hidden_state.unsqueeze(0) for hidden_state in outputs['hidden_states'][-4:]], 0)
del outputs
word_embeddings = torch.sum(last_four_hidden_states, 0) # [num_sent, number_of_tokens, 768]
return word_embeddings
| 1,047 | 42.666667 | 163 | py |
Few-NERD | Few-NERD-main/util/data_loader.py | import torch
import torch.utils.data as data
import os
from .fewshotsampler import FewshotSampler, FewshotSampleBase
import numpy as np
import json
def get_class_name(rawtag):
# get (finegrained) class name
if rawtag.startswith('B-') or rawtag.startswith('I-'):
return rawtag[2:]
else:
return rawtag
class Sample(FewshotSampleBase):
def __init__(self, filelines):
filelines = [line.split('\t') for line in filelines]
self.words, self.tags = zip(*filelines)
self.words = [word.lower() for word in self.words]
# strip B-, I-
self.normalized_tags = list(map(get_class_name, self.tags))
self.class_count = {}
def __count_entities__(self):
current_tag = self.normalized_tags[0]
for tag in self.normalized_tags[1:]:
if tag == current_tag:
continue
else:
if current_tag != 'O':
if current_tag in self.class_count:
self.class_count[current_tag] += 1
else:
self.class_count[current_tag] = 1
current_tag = tag
if current_tag != 'O':
if current_tag in self.class_count:
self.class_count[current_tag] += 1
else:
self.class_count[current_tag] = 1
def get_class_count(self):
if self.class_count:
return self.class_count
else:
self.__count_entities__()
return self.class_count
def get_tag_class(self):
# strip 'B' 'I'
tag_class = list(set(self.normalized_tags))
if 'O' in tag_class:
tag_class.remove('O')
return tag_class
def valid(self, target_classes):
return (set(self.get_class_count().keys()).intersection(set(target_classes))) and not (set(self.get_class_count().keys()).difference(set(target_classes)))
def __str__(self):
newlines = zip(self.words, self.tags)
return '\n'.join(['\t'.join(line) for line in newlines])
class FewShotNERDatasetWithRandomSampling(data.Dataset):
"""
Fewshot NER Dataset
"""
def __init__(self, filepath, tokenizer, N, K, Q, max_length, ignore_label_id=-1):
if not os.path.exists(filepath):
print("[ERROR] Data file does not exist!")
assert(0)
self.class2sampleid = {}
self.N = N
self.K = K
self.Q = Q
self.tokenizer = tokenizer
self.samples, self.classes = self.__load_data_from_file__(filepath)
self.max_length = max_length
self.sampler = FewshotSampler(N, K, Q, self.samples, classes=self.classes)
self.ignore_label_id = ignore_label_id
def __insert_sample__(self, index, sample_classes):
for item in sample_classes:
if item in self.class2sampleid:
self.class2sampleid[item].append(index)
else:
self.class2sampleid[item] = [index]
def __load_data_from_file__(self, filepath):
samples = []
classes = []
with open(filepath, 'r', encoding='utf-8')as f:
lines = f.readlines()
samplelines = []
index = 0
for line in lines:
line = line.strip()
if line:
samplelines.append(line)
else:
sample = Sample(samplelines)
samples.append(sample)
sample_classes = sample.get_tag_class()
self.__insert_sample__(index, sample_classes)
classes += sample_classes
samplelines = []
index += 1
if samplelines:
sample = Sample(samplelines)
samples.append(sample)
sample_classes = sample.get_tag_class()
self.__insert_sample__(index, sample_classes)
classes += sample_classes
samplelines = []
index += 1
classes = list(set(classes))
return samples, classes
def __get_token_label_list__(self, sample):
tokens = []
labels = []
for word, tag in zip(sample.words, sample.normalized_tags):
word_tokens = self.tokenizer.tokenize(word)
if word_tokens:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
word_labels = [self.tag2label[tag]] + [self.ignore_label_id] * (len(word_tokens) - 1)
labels.extend(word_labels)
return tokens, labels
def __getraw__(self, tokens, labels):
# get tokenized word list, attention mask, text mask (mask [CLS], [SEP] as well), tags
# split into chunks of length (max_length-2)
# 2 is for special tokens [CLS] and [SEP]
tokens_list = []
labels_list = []
while len(tokens) > self.max_length - 2:
tokens_list.append(tokens[:self.max_length-2])
tokens = tokens[self.max_length-2:]
labels_list.append(labels[:self.max_length-2])
labels = labels[self.max_length-2:]
if tokens:
tokens_list.append(tokens)
labels_list.append(labels)
# add special tokens and get masks
indexed_tokens_list = []
mask_list = []
text_mask_list = []
for i, tokens in enumerate(tokens_list):
# token -> ids
tokens = ['[CLS]'] + tokens + ['[SEP]']
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokens)
# padding
while len(indexed_tokens) < self.max_length:
indexed_tokens.append(0)
indexed_tokens_list.append(indexed_tokens)
# mask
mask = np.zeros((self.max_length), dtype=np.int32)
mask[:len(tokens)] = 1
mask_list.append(mask)
# text mask, also mask [CLS] and [SEP]
text_mask = np.zeros((self.max_length), dtype=np.int32)
text_mask[1:len(tokens)-1] = 1
text_mask_list.append(text_mask)
assert len(labels_list[i]) == len(tokens) - 2, print(labels_list[i], tokens)
return indexed_tokens_list, mask_list, text_mask_list, labels_list
def __additem__(self, index, d, word, mask, text_mask, label):
d['index'].append(index)
d['word'] += word
d['mask'] += mask
d['label'] += label
d['text_mask'] += text_mask
def __populate__(self, idx_list, savelabeldic=False):
'''
populate samples into data dict
set savelabeldic=True if you want to save label2tag dict
'index': sample_index
'word': tokenized word ids
'mask': attention mask in BERT
'label': NER labels
'sentence_num': number of sentences in this set (a batch contains multiple sets)
'text_mask': 0 for special tokens and paddings, 1 for real text
'''
dataset = {'index':[], 'word': [], 'mask': [], 'label':[], 'sentence_num':[], 'text_mask':[] }
for idx in idx_list:
tokens, labels = self.__get_token_label_list__(self.samples[idx])
word, mask, text_mask, label = self.__getraw__(tokens, labels)
word = torch.tensor(word).long()
mask = torch.tensor(np.array(mask)).long()
text_mask = torch.tensor(np.array(text_mask)).long()
self.__additem__(idx, dataset, word, mask, text_mask, label)
dataset['sentence_num'] = [len(dataset['word'])]
if savelabeldic:
dataset['label2tag'] = [self.label2tag]
return dataset
def __getitem__(self, index):
target_classes, support_idx, query_idx = self.sampler.__next__()
# add 'O' and make sure 'O' is labeled 0
distinct_tags = ['O'] + target_classes
self.tag2label = {tag:idx for idx, tag in enumerate(distinct_tags)}
self.label2tag = {idx:tag for idx, tag in enumerate(distinct_tags)}
support_set = self.__populate__(support_idx)
query_set = self.__populate__(query_idx, savelabeldic=True)
return support_set, query_set
def __len__(self):
return 100000
class FewShotNERDataset(FewShotNERDatasetWithRandomSampling):
def __init__(self, filepath, tokenizer, max_length, ignore_label_id=-1):
if not os.path.exists(filepath):
print("[ERROR] Data file does not exist!")
assert(0)
self.class2sampleid = {}
self.tokenizer = tokenizer
self.samples = self.__load_data_from_file__(filepath)
self.max_length = max_length
self.ignore_label_id = ignore_label_id
def __load_data_from_file__(self, filepath):
with open(filepath)as f:
lines = f.readlines()
for i in range(len(lines)):
lines[i] = json.loads(lines[i].strip())
return lines
def __additem__(self, d, word, mask, text_mask, label):
d['word'] += word
d['mask'] += mask
d['label'] += label
d['text_mask'] += text_mask
def __get_token_label_list__(self, words, tags):
tokens = []
labels = []
for word, tag in zip(words, tags):
word_tokens = self.tokenizer.tokenize(word)
if word_tokens:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
word_labels = [self.tag2label[tag]] + [self.ignore_label_id] * (len(word_tokens) - 1)
labels.extend(word_labels)
return tokens, labels
def __populate__(self, data, savelabeldic=False):
'''
populate samples into data dict
set savelabeldic=True if you want to save label2tag dict
'word': tokenized word ids
'mask': attention mask in BERT
'label': NER labels
'sentence_num': number of sentences in this set (a batch contains multiple sets)
'text_mask': 0 for special tokens and paddings, 1 for real text
'''
dataset = {'word': [], 'mask': [], 'label':[], 'sentence_num':[], 'text_mask':[] }
for i in range(len(data['word'])):
tokens, labels = self.__get_token_label_list__(data['word'][i], data['label'][i])
word, mask, text_mask, label = self.__getraw__(tokens, labels)
word = torch.tensor(word).long()
mask = torch.tensor(mask).long()
text_mask = torch.tensor(text_mask).long()
self.__additem__(dataset, word, mask, text_mask, label)
dataset['sentence_num'] = [len(dataset['word'])]
if savelabeldic:
dataset['label2tag'] = [self.label2tag]
return dataset
def __getitem__(self, index):
sample = self.samples[index]
target_classes = sample['types']
support = sample['support']
query = sample['query']
# add 'O' and make sure 'O' is labeled 0
distinct_tags = ['O'] + target_classes
self.tag2label = {tag:idx for idx, tag in enumerate(distinct_tags)}
self.label2tag = {idx:tag for idx, tag in enumerate(distinct_tags)}
support_set = self.__populate__(support)
query_set = self.__populate__(query, savelabeldic=True)
return support_set, query_set
def __len__(self):
return len(self.samples)
def collate_fn(data):
batch_support = {'word': [], 'mask': [], 'label':[], 'sentence_num':[], 'text_mask':[]}
batch_query = {'word': [], 'mask': [], 'label':[], 'sentence_num':[], 'label2tag':[], 'text_mask':[]}
support_sets, query_sets = zip(*data)
for i in range(len(support_sets)):
for k in batch_support:
batch_support[k] += support_sets[i][k]
for k in batch_query:
batch_query[k] += query_sets[i][k]
for k in batch_support:
if k != 'label' and k != 'sentence_num':
batch_support[k] = torch.stack(batch_support[k], 0)
for k in batch_query:
if k !='label' and k != 'sentence_num' and k!= 'label2tag':
batch_query[k] = torch.stack(batch_query[k], 0)
batch_support['label'] = [torch.tensor(tag_list).long() for tag_list in batch_support['label']]
batch_query['label'] = [torch.tensor(tag_list).long() for tag_list in batch_query['label']]
return batch_support, batch_query
def get_loader(filepath, tokenizer, N, K, Q, batch_size, max_length,
num_workers=8, collate_fn=collate_fn, ignore_index=-1, use_sampled_data=True):
if not use_sampled_data:
dataset = FewShotNERDatasetWithRandomSampling(filepath, tokenizer, N, K, Q, max_length, ignore_label_id=ignore_index)
else:
dataset = FewShotNERDataset(filepath, tokenizer, max_length, ignore_label_id=ignore_index)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader | 13,114 | 39.353846 | 162 | py |
Few-NERD | Few-NERD-main/util/viterbi.py | import torch
import torch.nn as nn
START_ID = 0
O_ID = 1
class ViterbiDecoder:
"""
Generalized Viterbi decoding
"""
def __init__(self, n_tag, abstract_transitions, tau):
"""
We assume the batch size is 1, so no need to worry about PAD for now
n_tag: START, O, and I_Xs
"""
super().__init__()
self.transitions = self.project_target_transitions(n_tag, abstract_transitions, tau)
@staticmethod
def project_target_transitions(n_tag, abstract_transitions, tau):
s_o, s_i, o_o, o_i, i_o, i_i, x_y = abstract_transitions
# self transitions for I-X tags
a = torch.eye(n_tag) * i_i
# transitions from I-X to I-Y
b = torch.ones(n_tag, n_tag) * x_y / (n_tag - 3)
c = torch.eye(n_tag) * x_y / (n_tag - 3)
transitions = a + b - c
# transition from START to O
transitions[START_ID, O_ID] = s_o
# transitions from START to I-X
transitions[START_ID, O_ID+1:] = s_i / (n_tag - 2)
# transition from O to O
transitions[O_ID, O_ID] = o_o
# transitions from O to I-X
transitions[O_ID, O_ID+1:] = o_i / (n_tag - 2)
# transitions from I-X to O
transitions[O_ID+1:, O_ID] = i_o
# no transitions to START
transitions[:, START_ID] = 0.
powered = torch.pow(transitions, tau)
summed = powered.sum(dim=1)
transitions = powered / summed.view(n_tag, 1)
transitions = torch.where(transitions > 0, transitions, torch.tensor(.000001))
#print(transitions)
#print(torch.sum(transitions, dim=1))
return torch.log(transitions)
def forward(self, scores: torch.Tensor) -> torch.Tensor: # type: ignore
"""
Take the emission scores calculated by NERModel, and return a tensor of CRF features,
which is the sum of transition scores and emission scores.
:param scores: emission scores calculated by NERModel.
shape: (batch_size, sentence_length, ntags)
:return: a tensor containing the CRF features whose shape is
(batch_size, sentence_len, ntags, ntags). F[b, t, i, j] represents
emission[t, j] + transition[i, j] for the b'th sentence in this batch.
"""
batch_size, sentence_len, _ = scores.size()
# expand the transition matrix batch-wise as well as sentence-wise
transitions = self.transitions.expand(batch_size, sentence_len, -1, -1)
# add another dimension for the "from" state, then expand to match
# the dimensions of the expanded transition matrix above
emissions = scores.unsqueeze(2).expand_as(transitions)
# add them up
return transitions + emissions
@staticmethod
def viterbi(features: torch.Tensor) -> torch.Tensor:
"""
Decode the most probable sequence of tags.
Note that the delta values are calculated in the log space.
:param features: the feature matrix from the forward method of CRF.
shaped (batch_size, sentence_len, ntags, ntags)
:return: a tensor containing the most probable sequences for the batch.
shaped (batch_size, sentence_len)
"""
batch_size, sentence_len, ntags, _ = features.size()
# initialize the deltas
delta_t = features[:, 0, START_ID, :]
deltas = [delta_t]
# use dynamic programming to iteratively calculate the delta values
for t in range(1, sentence_len):
f_t = features[:, t]
delta_t, _ = torch.max(f_t + delta_t.unsqueeze(2).expand_as(f_t), 1)
deltas.append(delta_t)
# now iterate backward to figure out the most probable tags
sequences = [torch.argmax(deltas[-1], 1, keepdim=True)]
for t in reversed(range(sentence_len - 1)):
f_prev = features[:, t + 1].gather(
2, sequences[-1].unsqueeze(2).expand(batch_size, ntags, 1)).squeeze(2)
sequences.append(torch.argmax(f_prev + deltas[t], 1, keepdim=True))
sequences.reverse()
return torch.cat(sequences, dim=1) | 4,150 | 38.533333 | 93 | py |
Few-NERD | Few-NERD-main/util/framework.py | import os
import sklearn.metrics
import numpy as np
import sys
import time
from . import word_encoder
from . import data_loader
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
# from pytorch_pretrained_bert import BertAdam
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.nn.parallel import DistributedDataParallel as DDP
from .viterbi import ViterbiDecoder
def get_abstract_transitions(train_fname, use_sampled_data=True):
"""
Compute abstract transitions on the training dataset for StructShot
"""
if use_sampled_data:
samples = data_loader.FewShotNERDataset(train_fname, None, 1).samples
tag_lists = []
for sample in samples:
tag_lists += sample['support']['label'] + sample['query']['label']
else:
samples = data_loader.FewShotNERDatasetWithRandomSampling(train_fname, None, 1, 1, 1, 1).samples
tag_lists = [sample.tags for sample in samples]
s_o, s_i = 0., 0.
o_o, o_i = 0., 0.
i_o, i_i, x_y = 0., 0., 0.
for tags in tag_lists:
if tags[0] == 'O': s_o += 1
else: s_i += 1
for i in range(len(tags)-1):
p, n = tags[i], tags[i+1]
if p == 'O':
if n == 'O': o_o += 1
else: o_i += 1
else:
if n == 'O':
i_o += 1
elif p != n:
x_y += 1
else:
i_i += 1
trans = []
trans.append(s_o / (s_o + s_i))
trans.append(s_i / (s_o + s_i))
trans.append(o_o / (o_o + o_i))
trans.append(o_i / (o_o + o_i))
trans.append(i_o / (i_o + i_i + x_y))
trans.append(i_i / (i_o + i_i + x_y))
trans.append(x_y / (i_o + i_i + x_y))
return trans
def warmup_linear(global_step, warmup_step):
if global_step < warmup_step:
return global_step / warmup_step
else:
return 1.0
class FewShotNERModel(nn.Module):
def __init__(self, my_word_encoder, ignore_index=-1):
'''
word_encoder: Sentence encoder
You need to set self.cost as your own loss function.
'''
nn.Module.__init__(self)
self.ignore_index = ignore_index
self.word_encoder = nn.DataParallel(my_word_encoder)
self.cost = nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(self, support, query, N, K, Q):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
return: logits, pred
'''
raise NotImplementedError
def loss(self, logits, label):
'''
logits: Logits with the size (..., class_num)
label: Label with whatever size.
return: [Loss] (A single value)
'''
N = logits.size(-1)
return self.cost(logits.view(-1, N), label.view(-1))
def __delete_ignore_index(self, pred, label):
pred = pred[label != self.ignore_index]
label = label[label != self.ignore_index]
assert pred.shape[0] == label.shape[0]
return pred, label
def accuracy(self, pred, label):
'''
pred: Prediction results with whatever size
label: Label with whatever size
return: [Accuracy] (A single value)
'''
pred, label = self.__delete_ignore_index(pred, label)
return torch.mean((pred.view(-1) == label.view(-1)).type(torch.FloatTensor))
def __get_class_span_dict__(self, label, is_string=False):
'''
return a dictionary of each class label/tag corresponding to the entity positions in the sentence
{label:[(start_pos, end_pos), ...]}
'''
class_span = {}
current_label = None
i = 0
if not is_string:
# having labels in [0, num_of_class]
while i < len(label):
if label[i] > 0:
start = i
current_label = label[i]
i += 1
while i < len(label) and label[i] == current_label:
i += 1
if current_label in class_span:
class_span[current_label].append((start, i))
else:
class_span[current_label] = [(start, i)]
else:
assert label[i] == 0
i += 1
else:
# having tags in string format ['O', 'O', 'person-xxx', ..]
while i < len(label):
if label[i] != 'O':
start = i
current_label = label[i]
i += 1
while i < len(label) and label[i] == current_label:
i += 1
if current_label in class_span:
class_span[current_label].append((start, i))
else:
class_span[current_label] = [(start, i)]
else:
i += 1
return class_span
def __get_intersect_by_entity__(self, pred_class_span, label_class_span):
'''
return the count of correct entity
'''
cnt = 0
for label in label_class_span:
cnt += len(list(set(label_class_span[label]).intersection(set(pred_class_span.get(label,[])))))
return cnt
def __get_cnt__(self, label_class_span):
'''
return the count of entities
'''
cnt = 0
for label in label_class_span:
cnt += len(label_class_span[label])
return cnt
def __transform_label_to_tag__(self, pred, query):
'''
flatten labels and transform them to string tags
'''
pred_tag = []
label_tag = []
current_sent_idx = 0 # record sentence index in the batch data
current_token_idx = 0 # record token index in the batch data
assert len(query['sentence_num']) == len(query['label2tag'])
# iterate by each query set
for idx, num in enumerate(query['sentence_num']):
true_label = torch.cat(query['label'][current_sent_idx:current_sent_idx+num], 0)
# drop ignore index
true_label = true_label[true_label!=self.ignore_index]
true_label = true_label.cpu().numpy().tolist()
set_token_length = len(true_label)
# use the idx-th label2tag dict
pred_tag += [query['label2tag'][idx][label] for label in pred[current_token_idx:current_token_idx + set_token_length]]
label_tag += [query['label2tag'][idx][label] for label in true_label]
# update sentence and token index
current_sent_idx += num
current_token_idx += set_token_length
assert len(pred_tag) == len(label_tag)
assert len(pred_tag) == len(pred)
return pred_tag, label_tag
def __get_correct_span__(self, pred_span, label_span):
'''
return count of correct entity spans
'''
pred_span_list = []
label_span_list = []
for pred in pred_span:
pred_span_list += pred_span[pred]
for label in label_span:
label_span_list += label_span[label]
return len(list(set(pred_span_list).intersection(set(label_span_list))))
def __get_wrong_within_span__(self, pred_span, label_span):
'''
return count of entities with correct span, correct coarse type but wrong finegrained type
'''
cnt = 0
for label in label_span:
coarse = label.split('-')[0]
within_pred_span = []
for pred in pred_span:
if pred != label and pred.split('-')[0] == coarse:
within_pred_span += pred_span[pred]
cnt += len(list(set(label_span[label]).intersection(set(within_pred_span))))
return cnt
def __get_wrong_outer_span__(self, pred_span, label_span):
'''
return count of entities with correct span but wrong coarse type
'''
cnt = 0
for label in label_span:
coarse = label.split('-')[0]
outer_pred_span = []
for pred in pred_span:
if pred != label and pred.split('-')[0] != coarse:
outer_pred_span += pred_span[pred]
cnt += len(list(set(label_span[label]).intersection(set(outer_pred_span))))
return cnt
def __get_type_error__(self, pred, label, query):
'''
return finegrained type error cnt, coarse type error cnt and total correct span count
'''
pred_tag, label_tag = self.__transform_label_to_tag__(pred, query)
pred_span = self.__get_class_span_dict__(pred_tag, is_string=True)
label_span = self.__get_class_span_dict__(label_tag, is_string=True)
total_correct_span = self.__get_correct_span__(pred_span, label_span) + 1e-6
wrong_within_span = self.__get_wrong_within_span__(pred_span, label_span)
wrong_outer_span = self.__get_wrong_outer_span__(pred_span, label_span)
return wrong_within_span, wrong_outer_span, total_correct_span
def metrics_by_entity(self, pred, label):
'''
return entity level count of total prediction, true labels, and correct prediction
'''
pred = pred.view(-1)
label = label.view(-1)
pred, label = self.__delete_ignore_index(pred, label)
pred = pred.cpu().numpy().tolist()
label = label.cpu().numpy().tolist()
pred_class_span = self.__get_class_span_dict__(pred)
label_class_span = self.__get_class_span_dict__(label)
pred_cnt = self.__get_cnt__(pred_class_span)
label_cnt = self.__get_cnt__(label_class_span)
correct_cnt = self.__get_intersect_by_entity__(pred_class_span, label_class_span)
return pred_cnt, label_cnt, correct_cnt
def error_analysis(self, pred, label, query):
'''
return
token level false positive rate and false negative rate
entity level within error and outer error
'''
pred = pred.view(-1)
label = label.view(-1)
pred, label = self.__delete_ignore_index(pred, label)
fp = torch.sum(((pred > 0) & (label == 0)).type(torch.FloatTensor))
fn = torch.sum(((pred == 0) & (label > 0)).type(torch.FloatTensor))
pred = pred.cpu().numpy().tolist()
label = label.cpu().numpy().tolist()
within, outer, total_span = self.__get_type_error__(pred, label, query)
return fp, fn, len(pred), within, outer, total_span
class FewShotNERFramework:
def __init__(self, train_data_loader, val_data_loader, test_data_loader, viterbi=False, N=None, train_fname=None, tau=0.05, use_sampled_data=True):
'''
train_data_loader: DataLoader for training.
val_data_loader: DataLoader for validating.
test_data_loader: DataLoader for testing.
'''
self.train_data_loader = train_data_loader
self.val_data_loader = val_data_loader
self.test_data_loader = test_data_loader
self.viterbi = viterbi
if viterbi:
abstract_transitions = get_abstract_transitions(train_fname, use_sampled_data=use_sampled_data)
self.viterbi_decoder = ViterbiDecoder(N+2, abstract_transitions, tau)
def __load_model__(self, ckpt):
'''
ckpt: Path of the checkpoint
return: Checkpoint dict
'''
if os.path.isfile(ckpt):
checkpoint = torch.load(ckpt)
print("Successfully loaded checkpoint '%s'" % ckpt)
return checkpoint
else:
raise Exception("No checkpoint found at '%s'" % ckpt)
def item(self, x):
'''
PyTorch before and after 0.4
'''
torch_version = torch.__version__.split('.')
if int(torch_version[0]) == 0 and int(torch_version[1]) < 4:
return x[0]
else:
return x.item()
def train(self,
model,
model_name,
learning_rate=1e-1,
train_iter=30000,
val_iter=1000,
val_step=2000,
load_ckpt=None,
save_ckpt=None,
warmup_step=300,
grad_iter=1,
fp16=False,
use_sgd_for_bert=False):
'''
model: a FewShotREModel instance
model_name: Name of the model
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
ckpt_dir: Directory of checkpoints
learning_rate: Initial learning rate
train_iter: Num of iterations of training
val_iter: Num of iterations of validating
val_step: Validate every val_step steps
'''
print("Start training...")
# Init optimizer
print('Use bert optim!')
parameters_to_optimize = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
parameters_to_optimize = [
{'params': [p for n, p in parameters_to_optimize
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in parameters_to_optimize
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if use_sgd_for_bert:
optimizer = torch.optim.SGD(parameters_to_optimize, lr=learning_rate)
else:
optimizer = AdamW(parameters_to_optimize, lr=learning_rate, correct_bias=False)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=train_iter)
# load model
if load_ckpt:
state_dict = self.__load_model__(load_ckpt)['state_dict']
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print('ignore {}'.format(name))
continue
print('load {} from {}'.format(name, load_ckpt))
own_state[name].copy_(param)
if fp16:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
model.train()
# Training
best_f1 = 0.0
iter_loss = 0.0
iter_sample = 0
pred_cnt = 0
label_cnt = 0
correct_cnt = 0
it = 0
while it + 1 < train_iter:
for _, (support, query) in enumerate(self.train_data_loader):
label = torch.cat(query['label'], 0)
if torch.cuda.is_available():
for k in support:
if k != 'label' and k != 'sentence_num':
support[k] = support[k].cuda()
query[k] = query[k].cuda()
label = label.cuda()
logits, pred = model(support, query)
assert logits.shape[0] == label.shape[0], print(logits.shape, label.shape)
loss = model.loss(logits, label) / float(grad_iter)
tmp_pred_cnt, tmp_label_cnt, correct = model.metrics_by_entity(pred, label)
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if it % grad_iter == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
iter_loss += self.item(loss.data)
#iter_right += self.item(right.data)
pred_cnt += tmp_pred_cnt
label_cnt += tmp_label_cnt
correct_cnt += correct
iter_sample += 1
if (it + 1) % 100 == 0 or (it + 1) % val_step == 0:
precision = correct_cnt / pred_cnt
recall = correct_cnt / label_cnt
f1 = 2 * precision * recall / (precision + recall)
sys.stdout.write('step: {0:4} | loss: {1:2.6f} | [ENTITY] precision: {2:3.4f}, recall: {3:3.4f}, f1: {4:3.4f}'\
.format(it + 1, iter_loss/ iter_sample, precision, recall, f1) + '\r')
sys.stdout.flush()
if (it + 1) % val_step == 0:
_, _, f1, _, _, _, _ = self.eval(model, val_iter)
model.train()
if f1 > best_f1:
print('Best checkpoint')
torch.save({'state_dict': model.state_dict()}, save_ckpt)
best_f1 = f1
iter_loss = 0.
iter_sample = 0.
pred_cnt = 0
label_cnt = 0
correct_cnt = 0
if (it + 1) == train_iter:
break
it += 1
print("\n####################\n")
print("Finish training " + model_name)
def __get_emmissions__(self, logits, tags_list):
# split [num_of_query_tokens, num_class] into [[num_of_token_in_sent, num_class], ...]
emmissions = []
current_idx = 0
for tags in tags_list:
emmissions.append(logits[current_idx:current_idx+len(tags)])
current_idx += len(tags)
assert current_idx == logits.size()[0]
return emmissions
def viterbi_decode(self, logits, query_tags):
emissions_list = self.__get_emmissions__(logits, query_tags)
pred = []
for i in range(len(query_tags)):
sent_scores = emissions_list[i].cpu()
sent_len, n_label = sent_scores.shape
sent_probs = F.softmax(sent_scores, dim=1)
start_probs = torch.zeros(sent_len) + 1e-6
sent_probs = torch.cat((start_probs.view(sent_len, 1), sent_probs), 1)
feats = self.viterbi_decoder.forward(torch.log(sent_probs).view(1, sent_len, n_label+1))
vit_labels = self.viterbi_decoder.viterbi(feats)
vit_labels = vit_labels.view(sent_len)
vit_labels = vit_labels.detach().cpu().numpy().tolist()
for label in vit_labels:
pred.append(label-1)
return torch.tensor(pred).cuda()
def eval(self,
model,
eval_iter,
ckpt=None):
'''
model: a FewShotREModel instance
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
eval_iter: Num of iterations
ckpt: Checkpoint path. Set as None if using current model parameters.
return: Accuracy
'''
print("")
model.eval()
if ckpt is None:
print("Use val dataset")
eval_dataset = self.val_data_loader
else:
print("Use test dataset")
if ckpt != 'none':
state_dict = self.__load_model__(ckpt)['state_dict']
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
own_state[name].copy_(param)
eval_dataset = self.test_data_loader
pred_cnt = 0 # pred entity cnt
label_cnt = 0 # true label entity cnt
correct_cnt = 0 # correct predicted entity cnt
fp_cnt = 0 # misclassify O as I-
fn_cnt = 0 # misclassify I- as O
total_token_cnt = 0 # total token cnt
within_cnt = 0 # span correct but of wrong fine-grained type
outer_cnt = 0 # span correct but of wrong coarse-grained type
total_span_cnt = 0 # span correct
eval_iter = min(eval_iter, len(eval_dataset))
with torch.no_grad():
it = 0
while it + 1 < eval_iter:
for _, (support, query) in enumerate(eval_dataset):
label = torch.cat(query['label'], 0)
if torch.cuda.is_available():
for k in support:
if k != 'label' and k != 'sentence_num':
support[k] = support[k].cuda()
query[k] = query[k].cuda()
label = label.cuda()
logits, pred = model(support, query)
if self.viterbi:
pred = self.viterbi_decode(logits, query['label'])
tmp_pred_cnt, tmp_label_cnt, correct = model.metrics_by_entity(pred, label)
fp, fn, token_cnt, within, outer, total_span = model.error_analysis(pred, label, query)
pred_cnt += tmp_pred_cnt
label_cnt += tmp_label_cnt
correct_cnt += correct
fn_cnt += self.item(fn.data)
fp_cnt += self.item(fp.data)
total_token_cnt += token_cnt
outer_cnt += outer
within_cnt += within
total_span_cnt += total_span
if it + 1 == eval_iter:
break
it += 1
epsilon = 1e-6
precision = correct_cnt / (pred_cnt + epsilon)
recall = correct_cnt / (label_cnt + epsilon)
f1 = 2 * precision * recall / (precision + recall + epsilon)
fp_error = fp_cnt / total_token_cnt
fn_error = fn_cnt / total_token_cnt
within_error = within_cnt / (total_span_cnt + epsilon)
outer_error = outer_cnt / (total_span_cnt + epsilon)
sys.stdout.write('[EVAL] step: {0:4} | [ENTITY] precision: {1:3.4f}, recall: {2:3.4f}, f1: {3:3.4f}'.format(it + 1, precision, recall, f1) + '\r')
sys.stdout.flush()
print("")
return precision, recall, f1, fp_error, fn_error, within_error, outer_error
| 22,526 | 38.59051 | 158 | py |
Few-NERD | Few-NERD-main/model/nnshot.py | import sys
sys.path.append('..')
import util
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
class NNShot(util.framework.FewShotNERModel):
def __init__(self,word_encoder, dot=False, ignore_index=-1):
util.framework.FewShotNERModel.__init__(self, word_encoder, ignore_index=ignore_index)
self.drop = nn.Dropout()
self.dot = dot
def __dist__(self, x, y, dim):
if self.dot:
return (x * y).sum(dim)
else:
return -(torch.pow(x - y, 2)).sum(dim)
def __batch_dist__(self, S, Q, q_mask):
# S [class, embed_dim], Q [num_of_sent, num_of_tokens, embed_dim]
assert Q.size()[:2] == q_mask.size()
Q = Q[q_mask==1].view(-1, Q.size(-1))
return self.__dist__(S.unsqueeze(0), Q.unsqueeze(1), 2)
def __get_nearest_dist__(self, embedding, tag, mask, query, q_mask):
nearest_dist = []
S = embedding[mask==1].view(-1, embedding.size(-1))
tag = torch.cat(tag, 0)
assert tag.size(0) == S.size(0)
dist = self.__batch_dist__(S, query, q_mask) # [num_of_query_tokens, num_of_support_tokens]
for label in range(torch.max(tag)+1):
nearest_dist.append(torch.max(dist[:,tag==label], 1)[0])
nearest_dist = torch.stack(nearest_dist, dim=1) # [num_of_query_tokens, class_num]
return nearest_dist
def forward(self, support, query):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances in the query set
'''
support_emb = self.word_encoder(support['word'], support['mask']) # [num_sent, number_of_tokens, 768]
query_emb = self.word_encoder(query['word'], query['mask']) # [num_sent, number_of_tokens, 768]
support_emb = self.drop(support_emb)
query_emb = self.drop(query_emb)
logits = []
current_support_num = 0
current_query_num = 0
assert support_emb.size()[:2] == support['mask'].size()
assert query_emb.size()[:2] == query['mask'].size()
for i, sent_support_num in enumerate(support['sentence_num']):
sent_query_num = query['sentence_num'][i]
# Calculate nearest distance to single entity in each class in support set
logits.append(self.__get_nearest_dist__(support_emb[current_support_num:current_support_num+sent_support_num],
support['label'][current_support_num:current_support_num+sent_support_num],
support['text_mask'][current_support_num: current_support_num+sent_support_num],
query_emb[current_query_num:current_query_num+sent_query_num],
query['text_mask'][current_query_num: current_query_num+sent_query_num]))
current_query_num += sent_query_num
current_support_num += sent_support_num
logits = torch.cat(logits, 0)
_, pred = torch.max(logits, 1)
return logits, pred
| 3,140 | 40.88 | 123 | py |
Few-NERD | Few-NERD-main/model/proto.py | import sys
sys.path.append('..')
import util
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
class Proto(util.framework.FewShotNERModel):
def __init__(self,word_encoder, dot=False, ignore_index=-1):
util.framework.FewShotNERModel.__init__(self, word_encoder, ignore_index=ignore_index)
self.drop = nn.Dropout()
self.dot = dot
def __dist__(self, x, y, dim):
if self.dot:
return (x * y).sum(dim)
else:
return -(torch.pow(x - y, 2)).sum(dim)
def __batch_dist__(self, S, Q, q_mask):
# S [class, embed_dim], Q [num_of_sent, num_of_tokens, embed_dim]
assert Q.size()[:2] == q_mask.size()
Q = Q[q_mask==1].view(-1, Q.size(-1)) # [num_of_all_text_tokens, embed_dim]
return self.__dist__(S.unsqueeze(0), Q.unsqueeze(1), 2)
def __get_proto__(self, embedding, tag, mask):
proto = []
embedding = embedding[mask==1].view(-1, embedding.size(-1))
tag = torch.cat(tag, 0)
assert tag.size(0) == embedding.size(0)
for label in range(torch.max(tag)+1):
proto.append(torch.mean(embedding[tag==label], 0))
proto = torch.stack(proto)
return proto
def forward(self, support, query):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances in the query set
'''
support_emb = self.word_encoder(support['word'], support['mask']) # [num_sent, number_of_tokens, 768]
query_emb = self.word_encoder(query['word'], query['mask']) # [num_sent, number_of_tokens, 768]
support_emb = self.drop(support_emb)
query_emb = self.drop(query_emb)
# Prototypical Networks
logits = []
current_support_num = 0
current_query_num = 0
assert support_emb.size()[:2] == support['mask'].size()
assert query_emb.size()[:2] == query['mask'].size()
for i, sent_support_num in enumerate(support['sentence_num']):
sent_query_num = query['sentence_num'][i]
# Calculate prototype for each class
support_proto = self.__get_proto__(
support_emb[current_support_num:current_support_num+sent_support_num],
support['label'][current_support_num:current_support_num+sent_support_num],
support['text_mask'][current_support_num: current_support_num+sent_support_num])
# calculate distance to each prototype
logits.append(self.__batch_dist__(
support_proto,
query_emb[current_query_num:current_query_num+sent_query_num],
query['text_mask'][current_query_num: current_query_num+sent_query_num])) # [num_of_query_tokens, class_num]
current_query_num += sent_query_num
current_support_num += sent_support_num
logits = torch.cat(logits, 0)
_, pred = torch.max(logits, 1)
return logits, pred
| 3,166 | 39.088608 | 124 | py |
pycbc | pycbc-master/pycbc/results/str_utils.py | # Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides functions for formatting values into strings for display.
"""
import numpy
mjax_header = """
<script type="text/x-mathjax-config">
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$']]}});
</script>
<script type="text/javascript"
src="//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>
"""
def mathjax_html_header():
"""Standard header to use for html pages to display latex math.
Returns
-------
header: str
The necessary html head needed to use latex on an html page.
"""
return mjax_header
def drop_trailing_zeros(num):
"""
Drops the trailing zeros in a float that is printed.
"""
txt = '%f' %(num)
txt = txt.rstrip('0')
if txt.endswith('.'):
txt = txt[:-1]
return txt
def get_signum(val, err, max_sig=numpy.inf):
"""
Given an error, returns a string for val formated to the appropriate
number of significant figures.
"""
coeff, pwr = ('%e' % err).split('e')
if pwr.startswith('-'):
pwr = int(pwr[1:])
if round(float(coeff)) == 10.:
pwr -= 1
pwr = min(pwr, max_sig)
tmplt = '%.' + str(pwr+1) + 'f'
return tmplt % val
else:
pwr = int(pwr[1:])
if round(float(coeff)) == 10.:
pwr += 1
# if the error is large, we can sometimes get 0;
# adjust the round until we don't get 0 (assuming the actual
# value isn't 0)
return_val = round(val, -pwr+1)
if val != 0.:
loop_count = 0
max_recursion = 100
while return_val == 0.:
pwr -= 1
return_val = round(val, -pwr+1)
loop_count += 1
if loop_count > max_recursion:
raise ValueError("Maximum recursion depth hit! Input " +\
"values are: val = %f, err = %f" %(val, err))
return drop_trailing_zeros(return_val)
def format_value(value, error, plus_error=None, use_scientific_notation=3,
include_error=True, use_relative_error=False, ndecs=None):
"""Given a numerical value and some bound on it, formats the number into a
string such that the value is rounded to the nearest significant figure,
which is determined by the error = abs(value-bound).
Note: if either use_scientific_notation or include_error are True, the
returned string will include LaTeX characters.
Parameters
----------
value : float
The value to format.
error : float
The uncertainty in the value. This is used to determine the
number of significant figures to print. If the value has no
uncertainty, you can just do value*1e-k, where k+1 is the number
of significant figures you want.
plus_error : {None, float}
The upper uncertainty on the value; i.e., what you need to add to the
value to get its upper bound. If provided, ``error`` is assumed to be
the negative; i.e., value +plus_error -error. The number of
significant figures printed is determined from min(error,
plus_error).
use_scientific_notation : int, optional
If ``abs(log10(value))`` is greater than the given, the return string
will be formated to "\%.1f \\times 10^{p}", where p is the powers of 10
needed for the leading number in the value to be in the singles spot.
Otherwise will return "\%.(p+1)f". Default is 3. To turn off, set to
``numpy.inf``. Note: using scientific notation assumes that the
returned value will be enclosed in LaTeX math mode.
include_error : {True, bool}
Include the error in the return string; the output will be formated
val \\pm err, where err is the error rounded to the same
power of 10 as val. Otherwise, just the formatted value will
be returned. If plus_error is provided then the return text will be
formatted as ``val^{+plus_error}_{-error}``.
use_relative_error : {False, bool}
If include_error, the error will be formatted as a percentage of the
the value.
ndecs: {None, int}
Number of values after the decimal point. If not provided,
it will default to the number of values in the error.
Returns
-------
string
The value (and error, if include_error is True) formatted as a string.
Examples
--------
Given a value and its uncertainty:
>>> val, err
(3.9278372067613837e-22, 2.2351435286500487e-23)
Format with error quoted:
>>> format_value(val, err)
'3.93 \\pm 0.22\\times 10^{-22}'
Quote error as a relative error:
>>> format_value(val, err, use_relative_error=True)
'3.93 \\times 10^{-22} \\pm5.6\\%'
Format without the error and without scientific notation:
>>> format_value(val, err, use_scientific_notation=float('inf'),
include_error=False)
'0.000000000000000000000393'
Given an plus error:
>>> err_plus
8.2700310560051804e-24
Format with both bounds quoted:
>>> format_value(val, err, plus_error=err_plus)
'3.928^{+0.083}_{-0.224}\\times 10^{-22}'
Format with both bounds quoted as a relative error:
>>> format_value(val, err, plus_error=err_plus, use_relative_error=True)
'3.928\\times 10^{-22}\\,^{+2.1\\%}_{-5.7\\%}'
"""
minus_sign = '-' if value < 0. else ''
value = abs(value)
minus_err = abs(error)
if plus_error is None:
plus_err = minus_err
else:
plus_err = abs(plus_error)
error = min(minus_err, plus_err)
if value == 0. or abs(numpy.log10(value)) < use_scientific_notation:
conversion_factor = 0.
else:
conversion_factor = numpy.floor(numpy.log10(value))
value = value * 10**(-conversion_factor)
error = error * 10**(-conversion_factor)
if conversion_factor == 0.:
powfactor = ''
elif conversion_factor == 1.:
powfactor = r'\times 10'
else:
powfactor = r'\times 10^{%i}' %(int(conversion_factor))
if ndecs is not None:
decs = value * 10**(-ndecs)
else:
decs = error
# now round the the appropriate number of sig figs
valtxt = get_signum(value, decs)
valtxt = '{}{}'.format(minus_sign, valtxt)
if include_error:
if plus_error is None:
errtxt = get_signum(error, error)
if use_relative_error and float(valtxt) != 0.:
relative_err = 100.*float(errtxt)/float(valtxt)
# we round the relative error to the nearest 1% using
# get_signum; Note that if the relative error is < 1%,
# get_signum will automatically increase the number of values
# after the decimal until it gets to the first non-zero value
relative_err = get_signum(relative_err, 1.)
txt = r'%s %s \pm%s\%%' %(valtxt, powfactor, relative_err)
else:
txt = r'%s \pm %s%s' %(valtxt, errtxt, powfactor)
else:
plus_err = plus_err * 10**(-conversion_factor)
minus_err = minus_err * 10**(-conversion_factor)
minus_err_txt = get_signum(minus_err, decs)
plus_err_txt = get_signum(plus_err, decs)
if use_relative_error and float(valtxt) != 0.:
# same as above, but with plus and minus
rel_plus_err = get_signum(
100.*float(plus_err_txt)/float(valtxt), 1.)
rel_minus_err = get_signum(
100.*float(minus_err_txt)/float(valtxt), 1.)
txt = r'%s%s\,^{+%s\%%}_{-%s\%%}' %(valtxt, powfactor,
rel_plus_err, rel_minus_err)
else:
txt = r'%s^{+%s}_{-%s}%s' %(valtxt, plus_err_txt,
minus_err_txt, powfactor)
else:
txt = r'%s%s' %(valtxt, powfactor)
return txt
__all__ = [
"mathjax_html_header",
"drop_trailing_zeros",
"get_signum",
"format_value"
]
| 9,027 | 34.968127 | 83 | py |
pycbc | pycbc-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# PyCBC documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 11 17:02:52 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import time
import pycbc.version
import subprocess
import logging
import glob
import pycbc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
pycbc.init_logging(True)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.viewcode', 'sphinxcontrib.programoutput',
'sphinx.ext.napoleon', 'sphinx.ext.mathjax',
'matplotlib.sphinxext.plot_directive', 'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram', 'sphinx_design',
"sphinxcontrib.jquery",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyCBC'
copyright = u'2015, 2016, 2017, Alexander Nitz, Ian Harry, Christopher M. Biwer, Duncan A. Brown, Josh Willis, and Tito Dal Canton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pycbc.version.last_release
# The full version, including alpha/beta/rc tags.
release = pycbc.version.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['pycbc.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'style_nav_header_background': 'linear-gradient(0deg, rgba(0,0,0,1) 0%, rgba(193,193,255,1) 85%)',
'logo_only':True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_context = {
'display_github': True,
'github_user': 'gwastro',
'github_repo': 'pycbc',
'github_version': 'master/docs/',
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'https://raw.githubusercontent.com/gwastro/pycbc-logo/master/pycbc_logo_name.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyCBCdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyCBC.tex', u'PyCBC Documentation',
u'Alexander Nitz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pycbc', u'PyCBC Documentation',
[u'Alexander Nitz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyCBC', u'PyCBC Documentation',
u'Alexander Nitz', 'PyCBC', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'h5py': ('http://docs.h5py.org/en/stable/', None),
}
napoleon_use_ivar = False
suppress_warnings = ['image.nonlocal_uri']
def build_includes():
"""Creates rst files in the _include directory using the python scripts
there.
This will ignore any files in the _include directory that start with ``_``.
"""
logging.info("Running scripts in _include:")
cwd = os.getcwd()
os.chdir('_include')
pyfiles = glob.glob('*.py') + glob.glob('*.sh')
run_args = []
for fn in pyfiles:
if not fn.startswith('_'):
if fn.endswith('.py'):
exe = 'python'
elif fn.endswith('.sh'):
exe = 'bash'
args = [exe, fn]
run_args.append(args)
run_num = 2 # Number of scripts to run in parallel
i = 0
running = []
still_running = True
while still_running:
time.sleep(0.01) # Sleep so this process doesn't eat CPU time
if len(running) < run_num and i < len(run_args):
args = run_args[i]
proc = subprocess.Popen(args,
stdout=None,
stderr=None)
logging.info('Running: {}'.format(' '.join(proc.args)))
i += 1
running.append(proc)
for proc in running:
status = proc.poll()
r = proc.returncode
if status is not None:
if r == 0:
print('DONE with :{}'.format(' '.join(proc.args)))
else:
msg = "Failure to run {}".format(' '.join(proc.args))
for p in running:
p.terminate()
raise RuntimeError(msg)
running.remove(proc)
if len(running) == 0 and i == len(run_args):
still_running = False
os.chdir(cwd)
if not 'SKIP_PYCBC_DOCS_INCLUDE' in os.environ:
build_includes()
def setup(app):
app.add_js_file('typed.min.js')
app.add_js_file('terminal.css')
app.add_js_file("theme_overrides.css")
# -- Options for inheritance graphs -------------------------------------------
# Makes the graphs be vertically aligned, with parents at the top
inheritance_graph_attrs = {'rankdir': 'TB'}
| 11,336 | 31.484241 | 132 | py |
MAPS-mt | MAPS-mt-main/interactive.py | import os
import difflib
import logging
import argparse
import warnings
from typing import List
from langcodes import Language
from data.trigger_sents import SUPPORT_LANGS
from comet import load_from_checkpoint, download_model
from data import demo_ex_dict, kw_ex_dict, topic_ex_dict
from model.openai.translate import api_key, model2max_context, num_tokens_from_string, batch_translate_with_backoff, translate_with_backoff
from tabulate import tabulate
from termcolor import colored
import shutil
warnings.filterwarnings("ignore", category=UserWarning, module="pytorch_lightning.trainer.setup")
SUPPORTED_LANG_PAIRS = [f"{s}-{t}" for s in SUPPORT_LANGS for t in SUPPORT_LANGS if s != t]
MODEL_NAME = "text-davinci-003" #TODO: support more models
KNOW2COLOR = {
"Keywords": 'light_red',
"Topics": 'light_green',
"Demo": 'light_yellow',
}
comet_model_mapping = {
"wmt21-comet-qe-da": "wmt21-comet-qe-da/checkpoints/model.ckpt",
}
def parse_args():
parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--lang-pair", "-lp", type=str, required=True, choices=SUPPORTED_LANG_PAIRS, help="Language pair")
parser.add_argument("--comet-qe-model-name", type=str, default="wmt21-comet-qe-da", help="COMET QE model name")
parser.add_argument("--comet-saving-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'eval_ckpt'))
parser.add_argument("--only-final", action="store_true", help="Only output the final translation")
parser.add_argument("--use-gpu", action="store_true", help="Use gpu for QE model")
return parser.parse_args()
def query(prompt):
len_prompt = num_tokens_from_string(prompt, MODEL_NAME)
return translate_with_backoff(
prompt,
MODEL_NAME,
max_tokens=model2max_context[MODEL_NAME]-len_prompt,
api_key=api_key,
temperature=0.0
)
def batch_query(prompts):
if len(prompts) == 0:
return []
len_prompt = max([num_tokens_from_string(p, MODEL_NAME) for p in prompts])
return batch_translate_with_backoff(
prompts,
MODEL_NAME,
max_tokens=model2max_context[MODEL_NAME]-len_prompt,
api_key=api_key,
temperature=0.0
)
def mine_keywords_prompt(source_sentence: str, src_lng: str, tgt_lng: str, src_full: str, tgt_full: str):
ex = kw_ex_dict[(src_lng, tgt_lng)]
all_items = ex + [(source_sentence, None)]
prompt_lst = []
for it in all_items:
it_src, it_kw = it
s = f"Let's extract the keywords in the following {src_full} sentence, and then translate these keywords into {tgt_full}.\n" + \
f"{src_full}: {it_src}\n" + \
(f"Keyword Pairs: {it_kw}" if it_kw else "Keyword Pairs:")
prompt_lst.append(s)
prompt = "\n\n".join(prompt_lst)
return prompt
def mine_topics_prompt(source_sentence: str, src_lng: str, tgt_lng: str):
ex = topic_ex_dict[(src_lng, tgt_lng)]
all_items = ex + [(source_sentence, None)]
prompt_lst = []
for it in all_items:
it_src, it_topic = it
s = f"Use a few words to describe the topics of the following input sentence.\n" + \
f"Input: {it_src}\n" + \
(f"Topics: {it_topic}" if it_topic else "Topics:")
prompt_lst.append(s)
prompt = "\n\n".join(prompt_lst)
return prompt
def mine_demo_prompt(source_sentence: str, src_lng: str, tgt_lng: str, src_full: str, tgt_full: str):
ex = demo_ex_dict[(src_lng, tgt_lng)]
all_items = ex + [(source_sentence, None, None)]
prompt_lst = []
for it in all_items:
it_src, it_demo_src, it_demo_tgt = it
s = f"Let's write {'an' if src_full == 'English' else 'a'} {src_full} sentence related to but different from the input {src_full} sentence and translate it into {tgt_full}\n" + \
f"Input {src_full} sentence: {it_src}\n" + \
(f"Output {src_full}-{tgt_full} sentence pair: {it_demo_src}\t{it_demo_tgt}" if (it_demo_src and it_demo_tgt) else f"Output {src_full}-{tgt_full} sentence pair:")
prompt_lst.append(s)
prompt = "\n\n".join(prompt_lst)
return prompt
def mine_knowledge(source_sentence: str, src_lng: str, tgt_lng: str, src_full: str, tgt_full: str):
prompts = []
prompts.append(mine_keywords_prompt(source_sentence, src_lng, tgt_lng, src_full, tgt_full))
prompts.append(mine_topics_prompt(source_sentence, src_lng, tgt_lng))
prompts.append(mine_demo_prompt(source_sentence, src_lng, tgt_lng, src_full, tgt_full))
return batch_query(prompts)
def knowledge_integration(source_sentence: str, src_full: str, tgt_full: str, keywords: str, topics: str, demo: str):
prompts = []
prompts.append(translate_prompt(source_sentence, src_full, tgt_full))
prompts.append(translate_with_knowledge_prompt("Keyword Pairs", keywords, source_sentence, src_full, tgt_full))
prompts.append(translate_with_knowledge_prompt("Topics", topics, source_sentence, src_full, tgt_full))
prompts.append(translate_with_knowledge_prompt(f"Related {src_full}-{tgt_full} sentence pairs", demo, source_sentence, src_full, tgt_full))
return batch_query(prompts)
def translate_with_knowledge_prompt(knowledge_type: str, knowledge_content: str, source_sentence: str, src_full: str, tgt_full: str):
prompt = f"{knowledge_type}: {knowledge_content}\n\n" + \
f"Instruction: Given the above knowledge, translate the following {src_full} text into {tgt_full}.\n" + \
f"{src_full}: {source_sentence}\n" + \
f"{tgt_full}:"
return prompt
def translate_prompt(source_sentence: str, src_full: str, tgt_full: str):
prompt = f"Instruction: Translate the following {src_full} text into {tgt_full}.\n" + \
f"{src_full}: {source_sentence}\n" + \
(f"{tgt_full}:")
return prompt
def comet_qe(comet_model, source_sentence: str, translation_candidates: List[str], use_gpu: bool):
data = []
for translation_candidate in translation_candidates:
data.append({"mt": translation_candidate, "src": source_sentence, "ref": None})
model_output = comet_model.predict(data, batch_size=4, gpus=1 if use_gpu else 0, progress_bar=False)
scores = model_output.scores
return scores
def argmax(lst):
return lst.index(max(lst))
def find_diff_str(str1: str, str2: str, know_name: str, language: str) -> str:
"""Highlight the differecnt part in `str`
Args:
str1 (str): the reference string, i.e., the base candidates
str2 (str): input string
know_name (str): string of knowledge, should be in `KNOWS`
language (str): the language full name
Returns:
str: highlighted str2
"""
d = difflib.Differ()
# helper function to process diffs
def process_diff(diff):
result = []
for fragment in diff:
if fragment[0] == ' ':
result.append(fragment[2:]) # Keep unchanged parts
elif fragment[0] == '-':
continue # Discard parts in str1 not in str2
elif fragment[0] == '+':
# Highlight additions from str2 not in str1
result.append(colored(fragment[2:], KNOW2COLOR[know_name]))
return result
if language in ['English', 'German']:
# split the input strings into word lists
str1_list = str1.split()
str2_list = str2.split()
diff = d.compare(str1_list, str2_list)
result = process_diff(diff)
result = ' '.join(result)
else:
diff = d.compare(str1, str2)
result = process_diff(diff)
result = ''.join(result)
return result
def main(args):
src_lng, tgt_lng = args.lang_pair.split('-')
src_full = Language.make(language=src_lng).display_name()
tgt_full = Language.make(language=tgt_lng).display_name()
# Loading the comet model
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(logging.ERROR)
if args.comet_qe_model_name in comet_model_mapping:
comet_model = load_from_checkpoint(os.path.join(args.comet_saving_dir, comet_model_mapping[args.comet_qe_model_name]))
else:
model_path = download_model(args.comet_qe_model_name, saving_directory=args.comet_saving_dir)
comet_model = load_from_checkpoint(model_path)
comet_model.eval()
# Translate
while True:
source_sentence = ""
while source_sentence == "":
source_sentence = input(f"\nEnter source {src_full} sentence: ")
# knowledge mining
keywords, topics, demo = mine_knowledge(source_sentence, src_lng, tgt_lng, src_full, tgt_full)
# knowledge integration
candidate_base, candidate_kw, candidate_topic, candidate_demo = knowledge_integration(source_sentence, src_full, tgt_full, keywords, topics, demo)
# knowledge selection
candidates = [candidate_base, candidate_kw, candidate_topic, candidate_demo]
scores = comet_qe(comet_model, source_sentence, candidates, args.use_gpu)
final_translaton = candidates[argmax(scores)]
# output
if args.only_final:
print(final_translaton)
else:
table = [
[colored("Keywords", KNOW2COLOR["Keywords"]), f"{keywords}"],
[colored("Topics", KNOW2COLOR["Topics"]), f"{topics}"],
[colored("Demo", KNOW2COLOR["Demo"]), f"{demo}"],
["----", "--"],
[colored("Cand Kw", KNOW2COLOR["Keywords"]), f"{find_diff_str(candidate_base, candidate_kw, 'Keywords', tgt_full)}"],
[colored("Cand Topic", KNOW2COLOR["Topics"]), f"{find_diff_str(candidate_base, candidate_topic, 'Topics', tgt_full)}"],
[colored("Cand Demo", KNOW2COLOR["Demo"]), f"{find_diff_str(candidate_base, candidate_demo, 'Demo', tgt_full)}"],
["Cand Base", f"{candidate_base}"],
["----", "--"],
["Final", colored(f"{final_translaton}", attrs=["bold"])],
]
width = min(shutil.get_terminal_size().columns-18, 120)
print(tabulate(table, tablefmt='fancy_grid', maxcolwidths=[None, width]))
if __name__ == "__main__":
args = parse_args()
main(args) | 10,309 | 41.780083 | 186 | py |
MAPS-mt | MAPS-mt-main/scripts/knowledge-selection.py | import os
import torch
import json
import random
import logging
import argparse
import threading
import numpy as np
from sacrebleu.metrics import BLEU
from comet import load_from_checkpoint, download_model
comet_model_mapping = {
"wmt21-comet-qe-da": "wmt21-comet-qe-da/checkpoints/model.ckpt",
}
def seed_everything(TORCH_SEED):
random.seed(TORCH_SEED)
os.environ['PYTHONHASHSEED'] = str(TORCH_SEED)
np.random.seed(TORCH_SEED)
torch.manual_seed(TORCH_SEED)
torch.cuda.manual_seed_all(TORCH_SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def bleu(**kwargs):
sys_lines = kwargs["sys_lines"]
ref_lines = kwargs["ref_lines"]
tgt_lang = kwargs["tgt_lang"]
if tgt_lang == "zh":
return [BLEU(tokenize="zh").corpus_score([sys_line], [[ref_line]]).score for sys_line, ref_line in zip(sys_lines, ref_lines)]
else:
return [BLEU().corpus_score([sys_line], [[ref_line]]).score for sys_line, ref_line in zip(sys_lines, ref_lines)]
def randscore(**kwargs):
sys_lines = kwargs["sys_lines"]
n_line = len(sys_lines)
random.uniform(0, 100)
return [random.uniform(0, 100) for _ in range(n_line)]
def comet(**kwargs):
sys_lines = kwargs["sys_lines"]
src_lines = kwargs["src_lines"]
ref_lines = kwargs["ref_lines"]
comet_model_name = kwargs["comet_model_name"]
comet_saving_dir = kwargs["comet_saving_dir"]
comet_cache_dir = kwargs["comet_cache_dir"]
batch_size = kwargs["batch_size"]
cache_file = os.path.join(comet_cache_dir, 'comet_cache.json')
cache_lock = threading.Lock()
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
else:
cache = {}
data = []
new_sys_lines, new_src_lines, new_ref_lines = [], [], []
for sys, src, ref in zip(sys_lines, src_lines, ref_lines):
cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)
if cache_key not in cache:
new_sys_lines.append(sys)
new_src_lines.append(src)
new_ref_lines.append(ref)
data.append({"mt": sys, "src": src, "ref": ref})
logging.info(f"COMET cache info: {len(sys_lines)-len(data)}/{len(sys_lines)}")
if data:
if comet_model_name in comet_model_mapping:
comet_model = load_from_checkpoint(os.path.join(comet_saving_dir, comet_model_mapping[comet_model_name]))
else:
model_path = download_model(comet_model_name, saving_directory=comet_saving_dir)
comet_model = load_from_checkpoint(model_path)
comet_model.eval()
model_output = comet_model.predict(data, batch_size=batch_size, gpus=1)
scores = model_output.scores
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
else:
cache = {}
for (sys, src, ref), score in zip(zip(new_sys_lines, new_src_lines, new_ref_lines), scores):
cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)
cache[cache_key] = score
with open(cache_file, 'w') as f:
json.dump(cache, f, indent=2, ensure_ascii=False)
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
final_scores = [cache[json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)] for sys, src, ref in zip(sys_lines, src_lines, ref_lines)]
return final_scores
def comet_qe(**kwargs):
sys_lines = kwargs["sys_lines"]
src_lines = kwargs["src_lines"]
comet_qe_model_name = kwargs["comet_qe_model_name"]
comet_saving_dir = kwargs["comet_saving_dir"]
comet_cache_dir = kwargs["comet_cache_dir"]
batch_size = kwargs["batch_size"]
cache_file = os.path.join(comet_cache_dir, 'comet_qe_cache.json')
cache_lock = threading.Lock()
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
else:
cache = {}
data = []
new_sys_lines, new_src_lines = [], []
for sys, src in zip(sys_lines, src_lines):
cache_key = json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False)
if cache_key not in cache:
new_sys_lines.append(sys)
new_src_lines.append(src)
data.append({"mt": sys, "src": src, "ref": None})
logging.info(f"COMET-QE cache info: {len(sys_lines)-len(data)}/{len(sys_lines)}")
if data:
if comet_qe_model_name in comet_model_mapping:
comet_model = load_from_checkpoint(os.path.join(comet_saving_dir, comet_model_mapping[comet_qe_model_name]))
else:
model_path = download_model(comet_qe_model_name, saving_directory=comet_saving_dir)
comet_model = load_from_checkpoint(model_path)
comet_model.eval()
model_output = comet_model.predict(data, batch_size=batch_size, gpus=1)
scores = model_output.scores
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
else:
cache = {}
for (sys, src), score in zip(zip(new_sys_lines, new_src_lines), scores):
cache_key = json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False)
cache[cache_key] = score
with open(cache_file, 'w') as f:
json.dump(cache, f, indent=2, ensure_ascii=False)
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
final_scores = [cache[json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False)] for sys, src in zip(sys_lines, src_lines)]
return final_scores
def readlines(file_path):
if not file_path:
return []
with open(file_path, 'r') as f:
lines = f.readlines()
return [l.strip() for l in lines]
def argmax(lst):
return lst.index(max(lst))
def parse_args():
parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--seed", type=int)
parser.add_argument("--sys", nargs='+', required=True, help="candidates")
parser.add_argument("--src", type=str, required=True, help="source")
parser.add_argument("--ref", type=str, default=None, help="reference")
parser.add_argument("--out", type=str, required=True, help="output path")
parser.add_argument("--src-lang", type=str, required=True, help="source langauge code")
parser.add_argument("--tgt-lang", type=str, required=True, help="target langauge code")
parser.add_argument("--comet-qe-model-name", type=str, default="wmt21-comet-qe-da")
parser.add_argument("--comet-model-name", type=str, default="Unbabel/wmt22-comet-da")
parser.add_argument("--comet-saving-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'eval_ckpt'))
parser.add_argument("--comet-cache-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache', 'comet'))
parser.add_argument("--metric", type=str, choices=["bleu", "comet", "comet_qe", "randscore"], required=True)
parser.add_argument("-bs", "--batch-size", type=int, default=32)
return parser.parse_args()
def main(args):
seed = args.seed
sys_file_paths = args.sys
src_file_path = args.src
ref_file_path = args.ref
out_file_path = args.out
src_lang = args.src_lang
tgt_lang = args.tgt_lang
comet_qe_model_name = args.comet_qe_model_name
comet_model_name = args.comet_model_name
comet_saving_dir = args.comet_saving_dir
comet_cache_dir = args.comet_cache_dir
metric = args.metric
batch_size = args.batch_size
if seed:
seed_everything(seed)
scorer = eval(metric)
sys_lines_lst = [
readlines(v)
for v in sys_file_paths
]
src_lines = readlines(src_file_path)
ref_lines = readlines(ref_file_path)
assert metric in ["comet_qe", "randscore"] or len(ref_lines) > 0
assert all([len(sys_lines) == len(src_lines) for sys_lines in sys_lines_lst])
combine_sys_lines = None
metrics_lst = None
metrics_lst = [
scorer(**{
"sys_lines": sys_lines,
"src_lines": src_lines,
"ref_lines": ref_lines,
"src_lang": src_lang,
"tgt_lang": tgt_lang,
"comet_qe_model_name": comet_qe_model_name,
"comet_model_name": comet_model_name,
"comet_cache_dir": comet_cache_dir,
"comet_saving_dir": comet_saving_dir,
"batch_size": batch_size
})
for sys_lines in sys_lines_lst
]
if metrics_lst and (not combine_sys_lines):
combine_sys_lines = []
for i in range(len(src_lines)):
metrics = [metrics[i] for metrics in metrics_lst]
sys_lines = [sys_lines[i] for sys_lines in sys_lines_lst]
max_idx = argmax(metrics)
combine_sys_lines.append(sys_lines[max_idx])
with open(out_file_path, 'w') as out_f:
out_f.write("\n".join(combine_sys_lines) + '\n')
if __name__ == "__main__":
args = parse_args()
main(args) | 9,515 | 36.027237 | 155 | py |
MAPS-mt | MAPS-mt-main/scripts/compare.py | import os
from comet.cli.compare import *
import threading
import logging
from bleurt import score as bleurt_score
from sacrebleu.metrics import BLEU
comet_model_mapping = {
"wmt21-comet-qe-da": "wmt21-comet-qe-da/checkpoints/model.ckpt",
}
def wait_until_path_exist(path):
while not os.path.isdir(path):
pass
return
def bleurt(**kwargs):
sys_lines = kwargs["sys_lines"]
ref_lines = kwargs["ref_lines"]
bleurt_cache_dir = kwargs["bleurt_cache_dir"]
bleurt_ckpt = kwargs["bleurt_ckpt"]
batch_size = kwargs["batch_size"]
cache_file = os.path.join(bleurt_cache_dir, 'bleurt_cache.json')
cache_lock = threading.Lock()
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
else:
cache = {}
new_sys_lines, new_ref_lines = [], []
for sys, ref in zip(sys_lines, ref_lines):
cache_key = json.dumps((sys, ref), ensure_ascii=False)
if cache_key not in cache:
new_sys_lines.append(sys)
new_ref_lines.append(ref)
logging.info(f"BLEURT cache info: {len(sys_lines)-len(new_sys_lines)}/{len(sys_lines)}")
assert len(new_sys_lines) == len(new_ref_lines)
if len(new_sys_lines) > 0:
bleurt_model = bleurt_score.LengthBatchingBleurtScorer(bleurt_ckpt)
scores = bleurt_model.score(references=new_ref_lines, candidates=new_sys_lines, batch_size=batch_size)
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
else:
cache = {}
for (sys, ref), score in zip(zip(new_sys_lines, new_ref_lines), scores):
cache_key = json.dumps((sys, ref), ensure_ascii=False)
cache[cache_key] = score
with open(cache_file, 'w') as f:
json.dump(cache, f, indent=2, ensure_ascii=False)
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
final_scores = [cache[json.dumps((sys, ref), ensure_ascii=False)] for sys, ref in zip(sys_lines, ref_lines)]
return final_scores
def comet(**kwargs):
sys_lines = kwargs["sys_lines"]
src_lines = kwargs["src_lines"]
ref_lines = kwargs["ref_lines"]
comet_model_name = kwargs["comet_model_name"]
comet_saving_dir = kwargs["comet_saving_dir"]
comet_cache_dir = kwargs["comet_cache_dir"]
batch_size = kwargs["batch_size"]
cache_file = os.path.join(comet_cache_dir, 'comet_cache.json')
wait_until_path_exist(comet_saving_dir)
cache_lock = threading.Lock()
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
else:
cache = {}
data = []
new_sys_lines, new_src_lines, new_ref_lines = [], [], []
for sys, src, ref in zip(sys_lines, src_lines, ref_lines):
cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)
if cache_key not in cache:
new_sys_lines.append(sys)
new_src_lines.append(src)
new_ref_lines.append(ref)
data.append({"mt": sys, "src": src, "ref": ref})
logging.info(f"COMET cache info: {len(sys_lines)-len(data)}/{len(sys_lines)}")
if data:
if comet_model_name in comet_model_mapping:
comet_model = load_from_checkpoint(os.path.join(comet_saving_dir, comet_model_mapping[comet_model_name]))
else:
model_path = download_model(comet_model_name, saving_directory=comet_saving_dir)
comet_model = load_from_checkpoint(model_path)
comet_model.eval()
model_output = comet_model.predict(data, batch_size=batch_size, gpus=1)
scores = model_output.scores
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
else:
cache = {}
for (sys, src, ref), score in zip(zip(new_sys_lines, new_src_lines, new_ref_lines), scores):
cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)
cache[cache_key] = score
with open(cache_file, 'w') as f:
json.dump(cache, f, indent=2, ensure_ascii=False)
with cache_lock:
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
cache = json.load(f)
final_scores = [cache[json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)] for sys, src, ref in zip(sys_lines, src_lines, ref_lines)]
return final_scores
def bleu(**kwargs):
sys_lines = kwargs["sys_lines"]
ref_lines = kwargs["ref_lines"]
tgt_lang = kwargs["tgt_lang"]
assert len(sys_lines) == len(ref_lines)
res = []
for sys, ref in zip(sys_lines, ref_lines):
bleu = BLEU(tokenize="flores200")
res.append(bleu.corpus_score([sys], [[ref]]).score)
del bleu
return res
def display_statistical_results(data: Statistical_test_info) -> None:
"""Print out the T-test results for a system pair.
Args:
data (Statistical_test_info): Stats to be printed out.
"""
print("==========================")
print("x_name:", data["x_name"])
print("y_name:", data["y_name"])
print("\nBootstrap Resampling Results:")
for k, v in data["bootstrap_resampling"].items():
print("{}:\t{:.4f}".format(k, v))
print("\nPaired T-Test Results:")
for k, v in data["paired_t-test"].items():
print("{}:\t{:.4f}".format(k, v))
x_seg_scores = data["bootstrap_resampling"]["x-mean"]
y_seg_scores = data["bootstrap_resampling"]["y-mean"]
best_system = (
data["x_name"]
if x_seg_scores > y_seg_scores
else data["y_name"]
)
worse_system = (
data["x_name"]
if x_seg_scores < y_seg_scores
else data["y_name"]
)
if data["paired_t-test"]["p_value"] <= 0.05:
print("Null hypothesis rejected according to t-test.")
print("Scores differ significantly across samples.")
print(f"{best_system} outperforms {worse_system}.")
else:
print("Null hypothesis can't be rejected.\nBoth systems have equal averages.")
def t_tests_summary(
t_test_results: List[Statistical_test_info],
translations: Tuple[Path_fr],
threshold_p_value: float = 0.05,
) -> None:
"""Prints T-tests Summary
Args:
t_test_results (List[Statistical_test_info]): List of stats between systems.
translations (Tuple[Path_fr]): Path to each system.
threshold_p_value (float): Threshold for p_value. Defaults to 0.05.
"""
n = len(translations)
name2id = {os.path.basename(name): i for i, name in enumerate(translations)}
grid = [[None] * n for name in translations]
for t_test in t_test_results:
p_value = t_test["paired_t-test"]["p_value"]
x_id = name2id[t_test["x_name"]]
y_id = name2id[t_test["y_name"]]
grid[x_id][y_id] = False
grid[y_id][x_id] = False
if p_value < threshold_p_value:
x_seg_scores = t_test["bootstrap_resampling"]["x-mean"]
y_seg_scores = t_test["bootstrap_resampling"]["y-mean"]
if x_seg_scores > y_seg_scores:
grid[x_id][y_id] = True
else:
grid[y_id][x_id] = True
# Add the row's name aka the system's name.
grid = [(os.path.basename(name),) + tuple(row) for name, row in zip(translations, grid)]
print("Summary")
print("If system_x is better than system_y then:")
print(
f"Null hypothesis rejected according to t-test with p_value={threshold_p_value}."
)
print("Scores differ significantly across samples.")
print(tabulate(grid, headers=("system_x \ system_y",) + tuple([os.path.basename(t) for t in translations])))
def score(cfg: Namespace, systems: List[Dict[str, List[str]]]) -> np.ndarray:
"""Scores each systems with a given model.
Args:
cfg (Namespace): comet-compare configs.
systems (List[Dict[str, List[str]]]): List with translations for each system.
Return:
np.ndarray: segment-level scores flatten.
"""
seg_scores = []
for system in systems:
samples = [dict(zip(system, t)) for t in zip(*system.values())]
sys_lines= [s["mt"] for s in samples]
src_lines= [s["src"] for s in samples]
ref_lines= [s["ref"] for s in samples]
comet_model_name = cfg.model
comet_saving_dir = cfg.model_storage_path
comet_cache_dir = cfg.comet_cache_dir
bleurt_ckpt = cfg.bleurt_ckpt
bleurt_cache_dir = cfg.bleurt_cache_dir
batch_size = cfg.batch_size
metric = cfg.metric
tgt_lang = cfg.tgt_lang
assert tgt_lang or metric != "bleu", "BLEU need to specify target language. (--tgt-lang xx)"
seg_scores += eval(metric)(
sys_lines=sys_lines,
src_lines=src_lines,
ref_lines=ref_lines,
comet_model_name=comet_model_name,
comet_saving_dir=comet_saving_dir,
comet_cache_dir=comet_cache_dir,
bleurt_ckpt=bleurt_ckpt,
bleurt_cache_dir=bleurt_cache_dir,
batch_size=batch_size,
tgt_lang=tgt_lang
)
n = len(systems[0]["src"])
# [grouper](https://docs.python.org/3/library/itertools.html#itertools-recipes)
seg_scores = list(zip(*[iter(seg_scores)] * n))
seg_scores = np.array(seg_scores, dtype="float32") # num_systems x num_translations
return seg_scores
def get_cfg() -> Namespace:
"""Parse the CLI options and arguments.
Return:
Namespace: comet-compare configs.
"""
parser = ArgumentParser(
description="Command for comparing multiple MT systems' translations."
)
parser.add_argument("--tgt-lang", type=str)
parser.add_argument("-s", "--sources", type=Path_fr)
parser.add_argument("-r", "--references", type=Path_fr)
parser.add_argument("-t", "--translations", nargs="*", type=Path_fr)
parser.add_argument("-d", "--sacrebleu_dataset", type=str)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--gpus", type=int, default=1)
parser.add_argument(
"--quiet", action="store_true", help="Sets all loggers to ERROR level."
)
parser.add_argument(
"--only_system", action="store_true", help="Prints only the final system score."
)
parser.add_argument(
"--num_splits",
type=int,
default=300,
help="Number of random partitions used in Bootstrap resampling.",
)
parser.add_argument(
"--sample_ratio",
type=float,
default=0.4,
help="Percentage of the testset to use in each split.",
)
parser.add_argument(
"--t_test_alternative",
type=str,
default="two-sided",
help=(
"Alternative hypothesis from scipy.stats.ttest_rel. The following options"
+ " are available: 'two-sided', 'less', 'greater'. Defaults to 'two-sided'"
),
)
parser.add_argument(
"--to_json",
type=str,
default="",
help="Exports results to a json file.",
)
parser.add_argument(
"--model",
type=str,
default="Unbabel/wmt22-comet-da",
help="COMET model to be used.",
)
parser.add_argument(
"--model_storage_path",
help=(
"Path to the directory where models will be stored. "
+ "By default its saved in ~/.cache/torch/unbabel_comet/"
),
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'eval_ckpt'),
)
parser.add_argument(
"--num_workers",
help="Number of workers to use when loading data.",
type=int,
default=None,
)
parser.add_argument(
"--disable_cache",
action="store_true",
help=(
"Disables sentence embeddings caching."
+ " This makes inference slower but saves memory."
),
)
parser.add_argument(
"--disable_length_batching",
action="store_true",
help="Disables length batching. This makes inference slower.",
)
parser.add_argument(
"--print_cache_info",
action="store_true",
help="Print information about COMET cache.",
)
parser.add_argument(
"--comet_cache_dir",
type=str,
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache', 'comet')
)
parser.add_argument(
"--bleurt_ckpt",
type=str,
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'eval_ckpt', 'BLEURT-20')
)
parser.add_argument(
"--bleurt_cache_dir",
type=str,
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache', 'bleurt')
)
parser.add_argument(
"--metric",
type=str,
choices=["comet", "bleurt", "bleu"],
required=True
)
cfg = parser.parse_args()
if cfg.sources is None and cfg.sacrebleu_dataset is None:
parser.error(f"You must specify a source (-s) or a sacrebleu dataset (-d)")
if cfg.sacrebleu_dataset is not None:
if cfg.references is not None or cfg.sources is not None:
parser.error(
f"Cannot use sacrebleu datasets (-d) with manually-specified datasets (-s and -r)"
)
try:
testset, langpair = cfg.sacrebleu_dataset.rsplit(":", maxsplit=1)
cfg.sources = Path_fr(get_source_file(testset, langpair))
cfg.references = Path_fr(get_reference_files(testset, langpair)[0])
except ValueError:
parser.error(
"SacreBLEU testset format must be TESTSET:LANGPAIR, e.g., wmt20:de-en"
)
except Exception as e:
import sys
print("SacreBLEU error:", e, file=sys.stderr)
sys.exit(1)
# if cfg.metric == "comet":
# if cfg.model.endswith(".ckpt") and os.path.exists(cfg.model):
# cfg.model_path = cfg.model
# else:
# cfg.model_path = os.path.join(cfg.model_storage_path, comet_model_mapping[cfg.model])
return cfg, parser
def compare_command() -> None:
"""CLI that uses comet to compare multiple systems in a pairwise manner."""
cfg, parser = get_cfg()
seed_everything(1)
assert len(cfg.translations) > 1, "You must provide at least 2 translation files"
with open(cfg.sources(), encoding="utf-8") as fp:
sources = [line.strip() for line in fp.readlines()]
translations = []
for system in cfg.translations:
with open(system, mode="r", encoding="utf-8") as fp:
translations.append([line.strip() for line in fp.readlines()])
if cfg.references is not None:
with open(cfg.references(), encoding="utf-8") as fp:
references = [line.strip() for line in fp.readlines()]
systems = [
{"src": sources, "mt": system, "ref": references} for system in translations
]
else:
systems = [{"src": sources, "mt": system} for system in translations]
seg_scores = score(cfg, systems)
population_size = seg_scores.shape[1]
sys_scores = bootstrap_resampling(
seg_scores,
sample_size=max(int(population_size * cfg.sample_ratio), 1),
num_splits=cfg.num_splits,
)
results = list(pairwise_bootstrap(sys_scores, cfg.translations))
# Paired T_Test Results:
pairs = combinations(zip(cfg.translations, seg_scores), 2)
for (x_name, x_seg_scores), (y_name, y_seg_scores) in pairs:
ttest_result = stats.ttest_rel(
x_seg_scores, y_seg_scores, alternative=cfg.t_test_alternative
)
for res in results:
if res["x_name"] == x_name and res["y_name"] == y_name:
res["paired_t-test"] = {
"statistic": ttest_result.statistic,
"p_value": ttest_result.pvalue,
}
for res in results:
res["x_name"] = os.path.basename(res["x_name"])
res["y_name"] = os.path.basename(res["y_name"])
info = {
"statistical_results": results,
"source": sources,
"translations": [
{
"name": os.path.basename(name),
"mt": trans,
"scores": scores.tolist(),
}
for name, trans, scores in zip(cfg.translations, translations, seg_scores)
],
}
if references is not None:
info["reference"] = references
for data in results:
display_statistical_results(data)
print()
t_tests_summary(results, tuple(cfg.translations))
print()
if cfg.to_json != "":
with open(cfg.to_json, "w", encoding="utf-8") as outfile:
json.dump(info, outfile, ensure_ascii=False, indent=4)
print("Predictions saved in: {}.".format(cfg.to_json))
if __name__ == "__main__":
compare_command() | 17,345 | 34.4 | 155 | py |
MAPS-mt | MAPS-mt-main/model/alpaca/translate.py | import os
import re
import torch
import argparse
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--model-name-or-path', type=str, required=True, help='model name in the hub or local path')
parser.add_argument('--input','-i', type=str, required=True, help='input file')
parser.add_argument('--output','-o', type=str, required=True, help='output file')
parser.add_argument('--search-algorithm', '-sa', type=str, default='beam', help='search algorithms: sample, beam')
parser.add_argument('--batch', '-b', type=int, default=2, help='batch size')
parser.add_argument('--temperature', '-t', type=float, default=0.1, help='temperature: 0.7 for text generation')
args = parser.parse_args()
seed = args.seed
model_name_or_path = args.model_name_or_path
input_file = args.input
output_file = args.output
search = args.search_algorithm
batch = args.batch
temperature = args.temperature
# read output file
num_done = 0
if os.path.exists(output_file):
with open(output_file, 'r') as out_file:
num_done = len(out_file.readlines())
# get input samples
with open(input_file, 'r') as in_file:
in_file_str = in_file.read()
in_samples = in_file_str.strip().split("\n\n\n")
for idx in range(len(in_samples)):
smp = in_samples[idx]
assert len(re.compile(r'\d\d\d\d\n').findall(smp)) >= 1
in_samples[idx] = smp.replace(f"{idx:04}\n", "", 1).strip()
total = len(in_samples)
in_samples = in_samples[num_done:]
with tqdm(total=total) as pbar:
pbar.update(num_done)
if len(in_samples) == 0:
exit(0)
# Load checkpoints
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, device_map="auto")
print(model.hf_device_map)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
tokenizer.padding_side = "left"
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
gen_config = GenerationConfig(
temperature=temperature,
do_sample=True,
num_beams=1,
max_new_tokens=256,
eos_token_id=tokenizer.eos_token_id,
pad_token=tokenizer.pad_token_id,
)
if search == "beam":
gen_config = GenerationConfig(
temperature=temperature,
num_beams=1,
max_new_tokens=256,
eos_token_id=tokenizer.eos_token_id,
pad_token=tokenizer.pad_token_id,
)
# Generate
if len(in_samples) > 0:
torch.manual_seed(args.seed)
with open(output_file, 'a', encoding='utf-8') as fo:
for i in range(0, len(in_samples), batch):
p = in_samples[i:i+batch]
tokenized = tokenizer(p, padding=True, return_tensors="pt")
input_ids = tokenized.input_ids.cuda()
attn_mask = tokenized.attention_mask.cuda()
input_ids = input_ids[:, :-1] if input_ids[0, -1] == tokenizer.eos_token_id else input_ids
attn_mask = attn_mask[:, :-1] if input_ids[0, -1] == tokenizer.eos_token_id else attn_mask
with torch.no_grad():
generated_ids = model.generate(inputs=input_ids, attention_mask=attn_mask, generation_config=gen_config)
for original_input, gen_id in zip(input_ids, generated_ids):
original_text = tokenizer.decode(original_input, skip_special_tokens=True)
gen_text = tokenizer.decode(gen_id, skip_special_tokens=True)
new_text = gen_text.replace(original_text, "").replace("\n", "").strip()
print(new_text, file=fo, flush=True)
pbar.update(len(p)) | 4,295 | 44.221053 | 128 | py |
UNIXKD | UNIXKD-master/teacher.py | import os
import os.path as osp
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR100
from tensorboardX import SummaryWriter
from utils import AverageMeter, accuracy
from models import model_dict
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='train teacher network.')
parser.add_argument('--epoch', type=int, default=240)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--lr', type=float, default=0.05)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--milestones', type=int, nargs='+', default=[150,180,210])
parser.add_argument('--save-interval', type=int, default=40)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--arch', type=str)
parser.add_argument('--gpu-id', type=int)
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
exp_name = 'teacher_{}'.format(args.arch)
exp_path = './experiments/{}'.format(exp_name)
os.makedirs(exp_path, exist_ok=True)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]),
])
trainset = CIFAR100('./data', train=True, transform=transform_train, download=True)
valset = CIFAR100('./data', train=False, transform=transform_test, download=True)
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=False)
val_loader = DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=False)
model = model_dict[args.arch](num_classes=100).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
logger = SummaryWriter(osp.join(exp_path, 'events'))
best_acc = -1
for epoch in range(args.epoch):
model.train()
loss_record = AverageMeter()
acc_record = AverageMeter()
start = time.time()
for x, target in train_loader:
optimizer.zero_grad()
x = x.cuda()
target = target.cuda()
output = model(x)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
batch_acc = accuracy(output, target, topk=(1,))[0]
loss_record.update(loss.item(), x.size(0))
acc_record.update(batch_acc.item(), x.size(0))
logger.add_scalar('train/cls_loss', loss_record.avg, epoch+1)
logger.add_scalar('train/cls_acc', acc_record.avg, epoch+1)
run_time = time.time() - start
info = 'train_Epoch:{:03d}/{:03d}\t run_time:{:.3f}\t cls_loss:{:.3f}\t cls_acc:{:.2f}\t'.format(
epoch+1, args.epoch, run_time, loss_record.avg, acc_record.avg)
print(info)
model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, target in val_loader:
x = x.cuda()
target = target.cuda()
with torch.no_grad():
output = model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
loss_record.update(loss.item(), x.size(0))
acc_record.update(batch_acc.item(), x.size(0))
run_time = time.time() - start
logger.add_scalar('val/cls_loss', loss_record.avg, epoch+1)
logger.add_scalar('val/cls_acc', acc_record.avg, epoch+1)
info = 'test_Epoch:{:03d}/{:03d}\t run_time:{:.2f}\t cls_loss:{:.3f}\t cls_acc:{:.2f}\n'.format(
epoch+1, args.epoch, run_time, loss_record.avg, acc_record.avg)
print(info)
scheduler.step()
# save checkpoint
if (epoch+1) in args.milestones or epoch+1==args.epoch or (epoch+1)%args.save_interval==0:
state_dict = dict(epoch=epoch+1, state_dict=model.state_dict(), acc=acc_record.avg)
name = osp.join(exp_path, 'ckpt/{:03d}.pth'.format(epoch+1))
os.makedirs(osp.dirname(name), exist_ok=True)
torch.save(state_dict, name)
# save best
if acc_record.avg > best_acc:
state_dict = dict(epoch=epoch+1, state_dict=model.state_dict(), acc=acc_record.avg)
name = osp.join(exp_path, 'ckpt/best.pth')
os.makedirs(osp.dirname(name), exist_ok=True)
torch.save(state_dict, name)
best_acc = acc_record.avg
print('best_acc: {:.2f}'.format(best_acc))
| 5,083 | 33.821918 | 110 | py |
UNIXKD | UNIXKD-master/utils.py | import os
import logging
import numpy as np
import time
import torch
from torch.nn import init
import torch.nn.functional as F
import torch.utils.data as data
from PIL import Image
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.count = 0
self.sum = 0.0
self.val = 0.0
self.avg = 0.0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def norm(x):
n = np.linalg.norm(x)
return x / n
def val(loader, args, t_model, s_model, logger, epoch):
s_model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, target in loader:
x = x.cuda()
target = target.cuda()
with torch.no_grad():
_, output = s_model(x, is_feat=True)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
acc_record.update(batch_acc.item(), x.size(0))
loss_record.update(loss.item(), x.size(0))
run_time = time.time() - start
if logger is not None:
logger.add_scalar('val/cls_loss', loss_record.avg, epoch+1)
logger.add_scalar('val/cls_acc', acc_record.avg, epoch+1)
info = 'student_test_Epoch:{:03d}\t run_time:{:.2f}\t cls_acc:{:.2f}\n'.format(
epoch+1, run_time, acc_record.avg)
print(info)
return acc_record.avg
def cal_center(loader, args, model):
model.eval()
feat = []
label = []
for x, target in loader:
x = x.cuda()
target = target.cuda()
with torch.no_grad():
batch_feat, output = model(x, is_feat=True)
feat.append(batch_feat[-1])
label.append(target)
feat = torch.cat(feat, dim=0).cpu().numpy()
label = torch.cat(label, dim=0).cpu().numpy()
center = []
for i in range(max(label)+1):
index = np.where(label==i)[0]
center.append(np.mean(feat[index], axis=0))
center = np.vstack(center)
center = torch.from_numpy(center).cuda()
return center
| 2,691 | 24.638095 | 87 | py |
UNIXKD | UNIXKD-master/zoo.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""Paying More Attention to Attention: Improving the Performance of Convolutional Neural Networks
via Attention Transfer
code: https://github.com/szagoruyko/attention-transfer"""
def __init__(self, p=2):
super(Attention, self).__init__()
self.p = p
def forward(self, g_s, g_t):
return [self.at_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
def at_loss(self, f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
return (self.at(f_s) - self.at(f_t)).pow(2).mean()
def at(self, f):
return F.normalize(f.pow(self.p).mean(1).view(f.size(0), -1))
class Similarity(nn.Module):
"""Similarity-Preserving Knowledge Distillation, ICCV2019, verified by original author"""
def __init__(self):
super(Similarity, self).__init__()
def forward(self, g_s, g_t):
return [self.similarity_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
def similarity_loss(self, f_s, f_t):
bsz = f_s.shape[0]
f_s = f_s.view(bsz, -1)
f_t = f_t.view(bsz, -1)
G_s = torch.mm(f_s, torch.t(f_s))
# G_s = G_s / G_s.norm(2)
G_s = torch.nn.functional.normalize(G_s)
G_t = torch.mm(f_t, torch.t(f_t))
# G_t = G_t / G_t.norm(2)
G_t = torch.nn.functional.normalize(G_t)
G_diff = G_t - G_s
loss = (G_diff * G_diff).view(-1, 1).sum(0) / (bsz * bsz)
return loss
| 1,745 | 30.178571 | 101 | py |
UNIXKD | UNIXKD-master/student_v0.py | import os
import os.path as osp
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from models import model_dict
from zoo import Attention, Similarity
from dataset import CIFAR100
from utils import accuracy, val, AverageMeter, cal_center
items = ['acc', 'loss', \
's_select_confidence', 's_select_margin', 's_select_entropy', \
's_else_confidence', 's_else_margin', 's_else_entropy', \
's_all_confidence', 's_all_margin', 's_all_entropy', \
't_confidence', 't_margin', 't_entropy', \
'center_dist']
parser = argparse.ArgumentParser(description='train student network.')
parser.add_argument('--epoch', type=int, default=240)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--k', type=int, default=48)
parser.add_argument('--b', type=int, default=32)
parser.add_argument('--w', type=float, default=1000)
parser.add_argument('--lr', type=float, default=0.05)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--milestones', type=float, nargs='+', default=[150, 180, 210])
parser.add_argument('--teacher-path', type=str, default='./experiments/teacher_resnet32x4')
parser.add_argument('--teacher-ckpt', type=str, default='best')
parser.add_argument('--student-arch', type=str, default='resnet8x4')
parser.add_argument('--ce-weight', type=float, default=0.0)
parser.add_argument('--kd-weight', type=float, default=1.0)
parser.add_argument('--other-distill', type=str, choices=['AT', 'SP'], default=None)
parser.add_argument('--T', type=float, default=4.0)
parser.add_argument('--strategy', type=int, choices=[0,1,2,3], default=3)
# 0: random, 1: least confidence, 2: margin, 3: entropy
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu-id', type=int, default=0)
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
torch.backends.cudnn.benchmark = True
teacher_arch = '_'.join(args.teacher_path.split('/')[-1].split('_')[1:])
exp_name = '{}_student_{}_teacher_{}-{}_strategy{}_k{}_b{}_w{}_seed{}'.format(\
__file__.split('.')[0].split('_')[-1], \
args.student_arch, teacher_arch, args.teacher_ckpt, \
args.strategy, \
args.k, args.b, args.w, \
args.seed)
exp_path = './experiments/{}'.format(exp_name)
os.makedirs(exp_path, exist_ok=True)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]),
])
trainset = CIFAR100('./data', train=True, transform=transform_train)
valset = CIFAR100('./data', train=False, transform=transform_test)
num_classes = 100
train_loader = DataLoader(trainset, batch_size=args.batch_size, \
shuffle=True, num_workers=3, pin_memory=True)
val_loader = DataLoader(valset, batch_size=args.batch_size, \
shuffle=False, num_workers=3, pin_memory=True)
ckpt_path = osp.join('{}/ckpt/{}.pth'.format( \
args.teacher_path, args.teacher_ckpt))
t_model = model_dict[teacher_arch](num_classes=num_classes).cuda()
state_dict = torch.load(ckpt_path)['state_dict']
t_model.load_state_dict(state_dict)
t_model.eval()
logger = SummaryWriter(osp.join(exp_path, 'events'))
s_model = model_dict[args.student_arch](num_classes=num_classes).cuda()
optimizer = optim.SGD(s_model.parameters(), lr=args.lr, \
momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer, milestones=args.milestones)
if args.other_distill is not None:
if args.other_distill == 'AT':
criterion = Attention()
weight = 1000
elif args.other_distill == 'SP':
criterion = Similarity()
weight = 3000
best_acc = 0
counter = torch.zeros(args.epoch, 50000).cuda()
epoch = 0
for epoch in range(args.epoch):
record = {name:AverageMeter() for name in items}
center = cal_center(val_loader, args, s_model)
for fuck, (x, y, k) in enumerate(train_loader):
s_model.train()
x = x.cuda()
y = y.cuda()
k = k.cuda()
with torch.no_grad():
s_feats, logits = s_model(x, is_feat=True)
probs = F.softmax(logits, dim=1)
# confidence
conf = probs.max(dim=1)[0]
# margin
rank = torch.argsort(probs, dim=1)
top2 = torch.gather(probs, dim=1, index=rank[:,-2:])
margin = top2[:,-1] - top2[:,-2]
# entropy
entropy = -torch.sum(probs * torch.log(probs), dim=1)
if args.strategy == 0:
scores = torch.rand(x.size(0)).cuda()
elif args.strategy == 1:
scores = 1 - conf
elif args.strategy == 2:
scores = -margin
elif args.strategy == 3:
scores = entropy
else:
raise ValueError('Invalid strategy.')
r = torch.arange(x.size(0)).float()
m = (2*args.b-1) / (2*args.batch_size)
mask_proto = 1 / (1 + torch.exp(-args.w * (r/args.batch_size - m) ))
mask_proto = mask_proto.cuda()
lamb = np.random.beta(1, 1)
mask = lamb * mask_proto.view(-1, 1, 1, 1)
rank = torch.argsort(scores, descending=True)
index = torch.randperm(x.size(0)).cuda()
x = (1-mask) * x[rank] + mask * x[index]
x = x[:args.k]
counter[epoch, k[rank[:args.b]] ] += 1
s_feats, s_logits = s_model(x, is_feat=True)
with torch.no_grad():
t_feats, t_logits = t_model(x, is_feat=True)
## for statistics
t_probs = F.softmax(t_logits, dim=1)
# confidence
t_conf = t_probs.max(dim=1)[0]
# margin
t_rank = torch.argsort(t_probs, dim=1)
t_top2 = torch.gather(t_probs, dim=1, index=t_rank[:,-2:])
t_margin = t_top2[:,-1] - t_top2[:,-2]
# entropy
t_entropy = -torch.sum(t_probs * torch.log(t_probs), dim=1)
# compute loss
log_s_probs = F.log_softmax(s_logits / args.T, dim=1)
t_probs = F.softmax(t_logits / args.T, dim=1)
tmp = mask.squeeze()[:args.k]
loss_ce = F.cross_entropy(s_logits, y[rank][:args.k], reduction='none') * (1-tmp) + \
F.cross_entropy(s_logits, y[index][:args.k], reduction='none') * tmp
loss_kd = F.kl_div(log_s_probs, t_probs, reduction='batchmean') * args.T * args.T
if args.other_distill is not None:
loss_other = sum(criterion(s_feats[1:-1], t_feats[1:-1])) if args.other_distill == 'AT' \
else sum(criterion(s_feats[-2], t_feats[-2]))
loss = args.ce_weight * loss_ce.mean() + args.kd_weight * loss_kd + weight * loss_other
else:
loss = args.ce_weight * loss_ce.mean() + args.kd_weight * loss_kd
# BP
optimizer.zero_grad()
loss.backward()
optimizer.step()
# compute distance between samples and center
C = center[y[rank[:args.k]]]
S = s_feats[-1]
D = torch.pow(C-S, 2).sum(dim=1).sqrt().mean()
record['center_dist'].update(D.item(), rank[:args.k].size(0))
batch_acc = accuracy(logits, y, topk=(1,))[0]
record['acc'].update(batch_acc.item(), logits.size(0))
record['loss'].update(loss.item(), s_logits.size(0))
i = rank[:args.k].size(0)
record['s_select_confidence'].update(conf[rank[:args.k]].mean().item(), i)
record['s_select_margin'].update(margin[rank[:args.k]].mean().item(), i)
record['s_select_entropy'].update(entropy[rank[:args.k]].mean().item(), i)
i = rank[args.k:].size(0)
if i > 0:
record['s_else_confidence'].update(conf[rank[args.k:]].mean().item(), i)
record['s_else_margin'].update(margin[rank[args.k:]].mean().item(), i)
record['s_else_entropy'].update(entropy[rank[args.k:]].mean().item(), i)
i = conf.size(0)
record['s_all_confidence'].update(conf.mean().item(), i)
record['s_all_margin'].update(margin.mean().item(), i)
record['s_all_entropy'].update(entropy.mean().item(), i)
i = t_conf.size(0)
record['t_confidence'].update(t_conf.mean().item(), i)
record['t_margin'].update(t_margin.mean().item(), i)
record['t_entropy'].update(t_entropy.mean().item(), i)
for item in items:
logger.add_scalar('train/{}'.format(item), record[item].avg, epoch+1)
# val
acc = val(val_loader, args, t_model, s_model, logger, epoch)
if acc > best_acc:
best_acc = acc
state_dict = dict(state_dict=s_model.state_dict(), best_acc=best_acc)
name = osp.join(exp_path, 'ckpt/student_best.pth')
os.makedirs(osp.dirname(name), exist_ok=True)
torch.save(state_dict, name)
scheduler.step()
if args.seed ==0 :
counter = counter.cpu().numpy()
np.save(osp.join(exp_path, 'counter.npy'), counter)
| 9,632 | 36.628906 | 101 | py |
UNIXKD | UNIXKD-master/dataset/utils.py | import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import torch
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
# check if file is already present locally
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else: # download the file
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tarxz(filename):
return filename.endswith(".tar.xz")
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_tgz(filename):
return filename.endswith(".tgz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path) or _is_tgz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_tarxz(from_path):
with tarfile.open(from_path, 'r:xz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
| 8,765 | 29.975265 | 109 | py |
UNIXKD | UNIXKD-master/dataset/vision.py | import os
import torch
import torch.utils.data as data
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(self, root, transforms=None, transform=None, target_transform=None):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can "
"be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return '\n'.join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def extra_repr(self):
return ""
class StandardTransform(object):
def __init__(self, transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
def __call__(self, input, target):
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def __repr__(self):
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform,
"Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform,
"Target transform: ")
return '\n'.join(body)
| 2,950 | 35.432099 | 86 | py |
UNIXKD | UNIXKD-master/models/resnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, depth, num_filters, block_name='BasicBlock', num_classes=10):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = num_filters[0]
self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, num_filters[1], n)
self.layer2 = self._make_layer(block, num_filters[2], n, stride=2)
self.layer3 = self._make_layer(block, num_filters[3], n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(num_filters[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1)))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, is_last=(i == blocks-1)))
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.relu)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
f0 = x
x, f1_pre = self.layer1(x) # 32x32
f1 = x
x, f2_pre = self.layer2(x) # 16x16
f2 = x
x, f3_pre = self.layer3(x) # 8x8
f3 = x
x = self.avgpool(x)
x = x.view(x.size(0), -1)
f4 = x
x = self.fc(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], x
else:
return [f0, f1, f2, f3, f4], x
else:
return x
def resnet8(**kwargs):
return ResNet(8, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14(**kwargs):
return ResNet(14, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet20(**kwargs):
return ResNet(20, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet32(**kwargs):
return ResNet(32, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet44(**kwargs):
return ResNet(44, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet56(**kwargs):
return ResNet(56, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet110(**kwargs):
return ResNet(110, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet8x4(**kwargs):
return ResNet(8, [32, 64, 128, 256], 'basicblock', **kwargs)
def resnet14x4(**kwargs):
return ResNet(14, [32, 64, 128, 256], 'basicblock', **kwargs)
def resnet32x4(**kwargs):
return ResNet(32, [32, 64, 128, 256], 'basicblock', **kwargs)
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 32, 32)
net = resnet8x4(num_classes=20)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,841 | 29.161538 | 116 | py |
UNIXKD | UNIXKD-master/models/mobilenetv2.py | """
MobileNetV2 implementation used in
<Knowledge Distillation via Route Constrained Optimization>
"""
import torch
import torch.nn as nn
import math
__all__ = ['mobilenetv2_T_w', 'mobile_half']
BN = None
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.blockname = None
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
self.names = ['0', '1', '2', '3', '4', '5', '6', '7']
def forward(self, x):
t = x
if self.use_res_connect:
return t + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
"""mobilenetV2"""
def __init__(self, T,
feature_dim,
input_size=32,
width_mult=1.,
remove_avg=False):
super(MobileNetV2, self).__init__()
self.remove_avg = remove_avg
# setting of inverted residual blocks
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[T, 24, 2, 1],
[T, 32, 3, 2],
[T, 64, 4, 2],
[T, 96, 3, 1],
[T, 160, 3, 2],
[T, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(32 * width_mult)
self.conv1 = conv_bn(3, input_channel, 2)
# building inverted residual blocks
self.blocks = nn.ModuleList([])
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * width_mult)
layers = []
strides = [s] + [1] * (n - 1)
for stride in strides:
layers.append(
InvertedResidual(input_channel, output_channel, stride, t)
)
input_channel = output_channel
self.blocks.append(nn.Sequential(*layers))
self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280
self.conv2 = conv_1x1_bn(input_channel, self.last_channel)
H = input_size // (32//2)
self.avgpool = nn.AvgPool2d(H, ceil_mode=True)
# building classifier
#self.classifier = nn.Sequential(
# # nn.Dropout(0.5),
# nn.Linear(self.last_channel, feature_dim),
#)
self.classifier = nn.Linear(self.last_channel, feature_dim)
self._initialize_weights()
print(T, width_mult)
def get_bn_before_relu(self):
bn1 = self.blocks[1][-1].conv[-1]
bn2 = self.blocks[2][-1].conv[-1]
bn3 = self.blocks[4][-1].conv[-1]
bn4 = self.blocks[6][-1].conv[-1]
return [bn1, bn2, bn3, bn4]
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.blocks)
return feat_m
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.blocks[0](out)
out = self.blocks[1](out)
f1 = out
out = self.blocks[2](out)
f2 = out
out = self.blocks[3](out)
out = self.blocks[4](out)
f3 = out
out = self.blocks[5](out)
out = self.blocks[6](out)
f4 = out
out = self.conv2(out)
if not self.remove_avg:
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.classifier(out)
if is_feat:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def mobilenetv2_T_w(T, W, feature_dim=100):
model = MobileNetV2(T=T, feature_dim=feature_dim, width_mult=W)
return model
def mobile_half(num_classes):
return mobilenetv2_T_w(6, 0.5, num_classes)
if __name__ == '__main__':
x = torch.randn(2, 3, 32, 32)
net = mobile_half(100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,777 | 27.323529 | 115 | py |
UNIXKD | UNIXKD-master/models/vgg.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, cfg, batch_norm=False, num_classes=1000):
super(VGG, self).__init__()
self.block0 = self._make_layers(cfg[0], batch_norm, 3)
self.block1 = self._make_layers(cfg[1], batch_norm, cfg[0][-1])
self.block2 = self._make_layers(cfg[2], batch_norm, cfg[1][-1])
self.block3 = self._make_layers(cfg[3], batch_norm, cfg[2][-1])
self.block4 = self._make_layers(cfg[4], batch_norm, cfg[3][-1])
self.pool0 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool4 = nn.AdaptiveAvgPool2d((1, 1))
# self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.block0)
feat_m.append(self.pool0)
feat_m.append(self.block1)
feat_m.append(self.pool1)
feat_m.append(self.block2)
feat_m.append(self.pool2)
feat_m.append(self.block3)
feat_m.append(self.pool3)
feat_m.append(self.block4)
feat_m.append(self.pool4)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block1[-1]
bn2 = self.block2[-1]
bn3 = self.block3[-1]
bn4 = self.block4[-1]
return [bn1, bn2, bn3, bn4]
def forward(self, x, is_feat=False, preact=False):
h = x.shape[2]
x = F.relu(self.block0(x))
f0 = x
x = self.pool0(x)
x = self.block1(x)
f1_pre = x
x = F.relu(x)
f1 = x
x = self.pool1(x)
x = self.block2(x)
f2_pre = x
x = F.relu(x)
f2 = x
x = self.pool2(x)
x = self.block3(x)
f3_pre = x
x = F.relu(x)
f3 = x
if h == 64:
x = self.pool3(x)
x = self.block4(x)
f4_pre = x
x = F.relu(x)
f4 = x
x = self.pool4(x)
x = x.view(x.size(0), -1)
f5 = x
x = self.classifier(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], x
else:
return [f0, f1, f2, f3, f4, f5], x
else:
return x
@staticmethod
def _make_layers(cfg, batch_norm=False, in_channels=3):
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
layers = layers[:-1]
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
cfg = {
'A': [[64], [128], [256, 256], [512, 512], [512, 512]],
'B': [[64, 64], [128, 128], [256, 256], [512, 512], [512, 512]],
'D': [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]],
'E': [[64, 64], [128, 128], [256, 256, 256, 256], [512, 512, 512, 512], [512, 512, 512, 512]],
'S': [[64], [128], [256], [512], [512]],
}
def vgg8(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], **kwargs)
return model
def vgg8_bn(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], batch_norm=True, **kwargs)
return model
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['A'], **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(cfg['A'], batch_norm=True, **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['B'], **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(cfg['B'], batch_norm=True, **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['D'], **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(cfg['D'], batch_norm=True, **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['E'], **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(cfg['E'], batch_norm=True, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 32, 32)
net = vgg19_bn(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 6,971 | 28.417722 | 98 | py |
UNIXKD | UNIXKD-master/models/classifier.py | from __future__ import print_function
import torch.nn as nn
#########################################
# ===== Classifiers ===== #
#########################################
class LinearClassifier(nn.Module):
def __init__(self, dim_in, n_label=10):
super(LinearClassifier, self).__init__()
self.net = nn.Linear(dim_in, n_label)
def forward(self, x):
return self.net(x)
class NonLinearClassifier(nn.Module):
def __init__(self, dim_in, n_label=10, p=0.1):
super(NonLinearClassifier, self).__init__()
self.net = nn.Sequential(
nn.Linear(dim_in, 200),
nn.Dropout(p=p),
nn.BatchNorm1d(200),
nn.ReLU(inplace=True),
nn.Linear(200, n_label),
)
def forward(self, x):
return self.net(x)
| 819 | 21.777778 | 51 | py |
UNIXKD | UNIXKD-master/models/resnetv2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
feat_m.append(self.layer4)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3, bn4]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out, f4_pre = self.layer4(out)
f4 = out
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.linear(out)
if is_feat:
if preact:
return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out]
else:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def ResNet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def ResNet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def ResNet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def ResNet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def ResNet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if __name__ == '__main__':
net = ResNet18(num_classes=100)
x = torch.randn(2, 3, 32, 32)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 6,915 | 33.753769 | 106 | py |
UNIXKD | UNIXKD-master/models/ShuffleNetv1.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.stride = stride
mid_planes = int(out_planes/4)
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
preact = torch.cat([out, res], 1) if self.stride == 2 else out+res
out = F.relu(preact)
# out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res)
if self.is_last:
return out, preact
else:
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], num_classes)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes,
stride=stride,
groups=groups,
is_last=(i == num_blocks - 1)))
self.in_planes = out_planes
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNet currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
def ShuffleV1(**kwargs):
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg, **kwargs)
if __name__ == '__main__':
x = torch.randn(2, 3, 32, 32)
net = ShuffleV1(num_classes=100)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 4,732 | 33.05036 | 126 | py |
UNIXKD | UNIXKD-master/models/util.py | from __future__ import print_function
import torch.nn as nn
import math
class Paraphraser(nn.Module):
"""Paraphrasing Complex Network: Network Compression via Factor Transfer"""
def __init__(self, t_shape, k=0.5, use_bn=False):
super(Paraphraser, self).__init__()
in_channel = t_shape[1]
out_channel = int(t_shape[1] * k)
self.encoder = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(out_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, f_s, is_factor=False):
factor = self.encoder(f_s)
if is_factor:
return factor
rec = self.decoder(factor)
return factor, rec
class Translator(nn.Module):
def __init__(self, s_shape, t_shape, k=0.5, use_bn=True):
super(Translator, self).__init__()
in_channel = s_shape[1]
out_channel = int(t_shape[1] * k)
self.encoder = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, f_s):
return self.encoder(f_s)
class Connector(nn.Module):
"""Connect for Knowledge Transfer via Distillation of Activation Boundaries Formed by Hidden Neurons"""
def __init__(self, s_shapes, t_shapes):
super(Connector, self).__init__()
self.s_shapes = s_shapes
self.t_shapes = t_shapes
self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes))
@staticmethod
def _make_conenctors(s_shapes, t_shapes):
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
connectors = []
for s, t in zip(s_shapes, t_shapes):
if s[1] == t[1] and s[2] == t[2]:
connectors.append(nn.Sequential())
else:
connectors.append(ConvReg(s, t, use_relu=False))
return connectors
def forward(self, g_s):
out = []
for i in range(len(g_s)):
out.append(self.connectors[i](g_s[i]))
return out
class ConnectorV2(nn.Module):
"""A Comprehensive Overhaul of Feature Distillation (ICCV 2019)"""
def __init__(self, s_shapes, t_shapes):
super(ConnectorV2, self).__init__()
self.s_shapes = s_shapes
self.t_shapes = t_shapes
self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes))
def _make_conenctors(self, s_shapes, t_shapes):
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
t_channels = [t[1] for t in t_shapes]
s_channels = [s[1] for s in s_shapes]
connectors = nn.ModuleList([self._build_feature_connector(t, s)
for t, s in zip(t_channels, s_channels)])
return connectors
@staticmethod
def _build_feature_connector(t_channel, s_channel):
C = [nn.Conv2d(s_channel, t_channel, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(t_channel)]
for m in C:
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return nn.Sequential(*C)
def forward(self, g_s):
out = []
for i in range(len(g_s)):
out.append(self.connectors[i](g_s[i]))
return out
class ConvReg(nn.Module):
"""Convolutional regression for FitNet"""
def __init__(self, s_shape, t_shape, use_relu=True):
super(ConvReg, self).__init__()
self.use_relu = use_relu
s_N, s_C, s_H, s_W = s_shape
t_N, t_C, t_H, t_W = t_shape
if s_H == 2 * t_H:
self.conv = nn.Conv2d(s_C, t_C, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(s_C, t_C, kernel_size=4, stride=2, padding=1)
elif s_H >= t_H:
self.conv = nn.Conv2d(s_C, t_C, kernel_size=(1+s_H-t_H, 1+s_W-t_W))
else:
raise NotImplemented('student size {}, teacher size {}'.format(s_H, t_H))
self.bn = nn.BatchNorm2d(t_C)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_relu:
return self.relu(self.bn(x))
else:
return self.bn(x)
class Regress(nn.Module):
"""Simple Linear Regression for hints"""
def __init__(self, dim_in=1024, dim_out=1024):
super(Regress, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.relu(x)
return x
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class LinearEmbed(nn.Module):
"""Linear Embedding"""
def __init__(self, dim_in=1024, dim_out=128):
super(LinearEmbed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
return x
class MLPEmbed(nn.Module):
"""non-linear embed by MLP"""
def __init__(self, dim_in=1024, dim_out=128):
super(MLPEmbed, self).__init__()
self.linear1 = nn.Linear(dim_in, 2 * dim_out)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(2 * dim_out, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.relu(self.linear1(x))
x = self.l2norm(self.linear2(x))
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
class Flatten(nn.Module):
"""flatten module"""
def __init__(self):
super(Flatten, self).__init__()
def forward(self, feat):
return feat.view(feat.size(0), -1)
class PoolEmbed(nn.Module):
"""pool and embed"""
def __init__(self, layer=0, dim_out=128, pool_type='avg'):
super().__init__()
if layer == 0:
pool_size = 8
nChannels = 16
elif layer == 1:
pool_size = 8
nChannels = 16
elif layer == 2:
pool_size = 6
nChannels = 32
elif layer == 3:
pool_size = 4
nChannels = 64
elif layer == 4:
pool_size = 1
nChannels = 64
else:
raise NotImplementedError('layer not supported: {}'.format(layer))
self.embed = nn.Sequential()
if layer <= 3:
if pool_type == 'max':
self.embed.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool_type == 'avg':
self.embed.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
self.embed.add_module('Flatten', Flatten())
self.embed.add_module('Linear', nn.Linear(nChannels*pool_size*pool_size, dim_out))
self.embed.add_module('Normalize', Normalize(2))
def forward(self, x):
return self.embed(x)
if __name__ == '__main__':
import torch
g_s = [
torch.randn(2, 16, 16, 16),
torch.randn(2, 32, 8, 8),
torch.randn(2, 64, 4, 4),
]
g_t = [
torch.randn(2, 32, 16, 16),
torch.randn(2, 64, 8, 8),
torch.randn(2, 128, 4, 4),
]
s_shapes = [s.shape for s in g_s]
t_shapes = [t.shape for t in g_t]
net = ConnectorV2(s_shapes, t_shapes)
out = net(g_s)
for f in out:
print(f.shape)
| 9,622 | 32.068729 | 107 | py |
UNIXKD | UNIXKD-master/models/ShuffleNetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
preact = self.bn3(self.conv3(out))
out = F.relu(preact)
# out = F.relu(self.bn3(self.conv3(out)))
preact = torch.cat([x1, preact], 1)
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
if self.is_last:
return out, preact
else:
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size, num_classes=10):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
# stride=1, padding=1, bias=False)
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], num_classes)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels, is_last=(i == num_blocks - 1)))
self.in_channels = out_channels
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNetV2 currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
configs = {
0.2: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 3, 3)
},
0.3: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 7, 3)
},
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def ShuffleV2(**kwargs):
model = ShuffleNetV2(net_size=1, **kwargs)
return model
if __name__ == '__main__':
net = ShuffleV2(num_classes=100)
x = torch.randn(3, 3, 32, 32)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 7,074 | 32.530806 | 107 | py |
UNIXKD | UNIXKD-master/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 32, 32)
net = wrn_40_2(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,519 | 31.280702 | 116 | py |
GraphLIME | GraphLIME-master/graphlime/__init__.py | __version__ = '1.2.0'
__all__ = [
'GraphLIME'
]
import numpy as np
from sklearn.linear_model import LassoLars
import torch
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import k_hop_subgraph
class GraphLIME:
def __init__(self, model, hop=2, rho=0.1, cached=True):
self.hop = hop
self.rho = rho
self.model = model
self.cached = cached
self.cached_result = None
self.model.eval()
def __flow__(self):
for module in self.model.modules():
if isinstance(module, MessagePassing):
return module.flow
return 'source_to_target'
def __subgraph__(self, node_idx, x, y, edge_index, **kwargs):
num_nodes, num_edges = x.size(0), edge_index.size(1)
subset, edge_index, mapping, edge_mask = k_hop_subgraph(
node_idx, self.hop, edge_index, relabel_nodes=True,
num_nodes=num_nodes, flow=self.__flow__())
x = x[subset]
y = y[subset]
for key, item in kwargs.items():
if torch.is_tensor(item) and item.size(0) == num_nodes:
item = item[subset]
elif torch.is_tensor(item) and item.size(0) == num_edges:
item = item[edge_mask]
kwargs[key] = item
return x, y, edge_index, mapping, edge_mask, kwargs
def __init_predict__(self, x, edge_index, **kwargs):
if self.cached and self.cached_result is not None:
if x.size(0) != self.cached_result.size(0):
raise RuntimeError(
'Cached {} number of nodes, but found {}.'.format(
x.size(0), self.cached_result.size(0)))
# get the initial prediction
if not self.cached or self.cached_result is None:
with torch.no_grad():
log_logits = self.model(x=x, edge_index=edge_index, **kwargs)
probas = log_logits.exp()
self.cached_result = probas
return self.cached_result
def __compute_kernel__(self, x, reduce):
assert x.ndim == 2, x.shape
n, d = x.shape
dist = x.reshape(1, n, d) - x.reshape(n, 1, d) # (n, n, d)
dist = dist ** 2
if reduce:
dist = np.sum(dist, axis=-1, keepdims=True) # (n, n, 1)
std = np.sqrt(d)
K = np.exp(-dist / (2 * std ** 2 * 0.1 + 1e-10)) # (n, n, 1) or (n, n, d)
return K
def __compute_gram_matrix__(self, x):
# unstable implementation due to matrix product (HxH)
# n = x.shape[0]
# H = np.eye(n, dtype=np.float) - 1.0 / n * np.ones(n, dtype=np.float)
# G = np.dot(np.dot(H, x), H)
# more stable and accurate implementation
G = x - np.mean(x, axis=0, keepdims=True)
G = G - np.mean(G, axis=1, keepdims=True)
G = G / (np.linalg.norm(G, ord='fro', axis=(0, 1), keepdims=True) + 1e-10)
return G
def explain_node(self, node_idx, x, edge_index, **kwargs):
probas = self.__init_predict__(x, edge_index, **kwargs)
x, probas, _, _, _, _ = self.__subgraph__(
node_idx, x, probas, edge_index, **kwargs)
x = x.detach().cpu().numpy() # (n, d)
y = probas.detach().cpu().numpy() # (n, classes)
n, d = x.shape
K = self.__compute_kernel__(x, reduce=False) # (n, n, d)
L = self.__compute_kernel__(y, reduce=True) # (n, n, 1)
K_bar = self.__compute_gram_matrix__(K) # (n, n, d)
L_bar = self.__compute_gram_matrix__(L) # (n, n, 1)
K_bar = K_bar.reshape(n ** 2, d) # (n ** 2, d)
L_bar = L_bar.reshape(n ** 2,) # (n ** 2,)
solver = LassoLars(self.rho, fit_intercept=False, normalize=False, positive=True)
solver.fit(K_bar * n, L_bar * n)
return solver.coef_
| 3,882 | 29.81746 | 89 | py |
GraphLIME | GraphLIME-master/exp/noise_features/other_explainers.py | import copy
import numpy as np
from tqdm import tqdm
from sklearn.linear_model import Ridge
import torch
class LIME:
def __init__(self, model, num_samples, cached=True):
self.model = model
self.num_samples = num_samples
self.cached = cached
self.cached_result = None
self.model.eval()
def __init_predict__(self, x, edge_index, **kwargs):
if self.cached and self.cached_result is not None:
if x.size(0) != self.cached_result.size(0):
raise RuntimeError(
'Cached {} number of nodes, but found {}.'.format(
x.size(0), self.cached_result.size(0)))
if not self.cached or self.cached_result is None:
# get the initial prediction
with torch.no_grad():
log_logits = self.model(x=x, edge_index=edge_index, **kwargs)
probas = log_logits.exp()
self.cached_result = probas
return self.cached_result
def explain_node(self, node_idx, x, edge_index, **kwargs):
probas = self.__init_predict__(x, edge_index, **kwargs)
proba, label = probas[node_idx, :].max(dim=0)
x_ = copy.deepcopy(x)
original_feats = x[node_idx, :]
sample_x = [original_feats.detach().cpu().numpy()]
sample_y = [proba.item()]
for _ in tqdm(range(self.num_samples), desc='collect samples', leave=False):
x_[node_idx, :] = original_feats + torch.randn_like(original_feats)
with torch.no_grad():
log_logits = self.model(x=x_, edge_index=edge_index, **kwargs)
probas_ = log_logits.exp()
proba_ = probas_[node_idx, label]
sample_x.append(x_[node_idx, :].detach().cpu().numpy())
sample_y.append(proba_.item())
sample_x = np.array(sample_x)
sample_y = np.array(sample_y)
solver = Ridge(alpha=0.1)
solver.fit(sample_x, sample_y)
return solver.coef_
class Greedy:
def __init__(self, model, cached=True):
self.model = model
self.cached = cached
self.cached_result = None
self.model.eval()
def __init_predict__(self, x, edge_index, **kwargs):
if self.cached and self.cached_result is not None:
if x.size(0) != self.cached_result.size(0):
raise RuntimeError(
'Cached {} number of nodes, but found {}.'.format(
x.size(0), self.cached_result.size(0)))
if not self.cached or self.cached_result is None:
# get the initial prediction
with torch.no_grad():
log_logits = self.model(x=x, edge_index=edge_index, **kwargs)
probas = log_logits.exp()
self.cached_result = probas
return self.cached_result
def explain_node(self, node_idices, x, edge_index, **kwargs):
if isinstance(node_idices, int):
node_idices = [node_idices]
probas = self.__init_predict__(x, edge_index, **kwargs)
probas, labels = probas[node_idices, :].max(dim=1) # (m,), (m,)
num_nodes, num_feats = len(node_idices), x.size(1)
delta_probas = np.zeros((num_nodes, num_feats)) # (m, #feats)
self.model.eval()
for feat_idx in tqdm(range(num_feats), desc='search features', leave=False):
x_ = copy.deepcopy(x)
x_[:, feat_idx] = 0.0
with torch.no_grad():
log_logits = self.model(x=x_, edge_index=edge_index, **kwargs)
probas_ = log_logits.exp()
probas_ = probas_[node_idices, :] # (m, #classes)
for node_idx in range(num_nodes):
proba = probas[node_idx].item()
label = labels[node_idx]
proba_ = probas_[node_idx, label].item()
delta_probas[node_idx, feat_idx] = abs((proba - proba_) / proba)
return delta_probas
class Random:
def __init__(self, num_feats, K):
self.num_feats = num_feats
self.K = K
def explain_node(self):
return np.random.choice(self.num_feats, self.K)
| 4,242 | 30.902256 | 84 | py |
GraphLIME | GraphLIME-master/exp/noise_features/exp_noise_features.py | from os import sys, path as osp
sys.path.append(osp.dirname(osp.dirname(osp.dirname(__file__))))
import random
import argparse
import warnings
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
from torch_geometric.nn import GNNExplainer
from models import GAT
from graphlime import GraphLIME
from other_explainers import LIME, Greedy, Random
from utils import prepare_data, extract_test_nodes, train, evaluate, plot_dist
warnings.filterwarnings('ignore')
INPUT_DIM = {
'Cora': 1433,
'Pubmed': 500
}
DIRNAME = osp.dirname(__file__)
def build_args():
parser = argparse.ArgumentParser()
# data
parser.add_argument('--dataset', type=str, default='Cora', help='dataset')
parser.add_argument('--model_epochs', type=int, default=400, help='epochs for training a GNN model')
parser.add_argument('--model_lr', type=float, default=0.001, help='learning rate for training model')
parser.add_argument('--test_samples', type=int, default=200, help='number of test samples')
parser.add_argument('--num_noise', type=int, default=10, help='number of noise features to add')
# GraphLIME
parser.add_argument('--hop', type=int, default=2, help='hops')
parser.add_argument('--rho', type=float, default=0.15, help='rho')
parser.add_argument('--K', type=int, default=300, help='top-K most importance features')
# GNNExplainer
parser.add_argument('--masks_epochs', type=int, default=200, help='epochs for training a GNNExplainer')
parser.add_argument('--masks_lr', type=float, default=0.01, help='learning rate for training GNNExplainer')
parser.add_argument('--masks_threshold', type=float, default=0.1, help='threshold of features for GNNExplainer')
# LIME
parser.add_argument('--lime_samples', type=int, default=50, help='generate samples for LIME')
# Greedy
parser.add_argument('--greedy_threshold', type=float, default=0.03, help='threshold of features for Greedy')
parser.add_argument('--ymax', type=float, default=1.10, help='max of y-axis')
parser.add_argument('--seed', type=int, default=42, help='seed')
args = parser.parse_args()
return args
def check_args(args):
assert args.dataset.title() in ['Cora', 'Pubmed']
def fix_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def find_noise_feats_by_GraphLIME(model, data, args):
explainer = GraphLIME(model, hop=args.hop, rho=args.rho)
node_indices = extract_test_nodes(data, args.test_samples)
num_noise_feats = []
for node_idx in tqdm(node_indices, desc='explain node', leave=False):
coefs = explainer.explain_node(node_idx, data.x, data.edge_index)
feat_indices = coefs.argsort()[-args.K:]
feat_indices = [idx for idx in feat_indices if coefs[idx] > 0.0]
num_noise_feat = sum(idx >= INPUT_DIM[args.dataset] for idx in feat_indices)
num_noise_feats.append(num_noise_feat)
return num_noise_feats
def find_noise_feats_by_GNNExplainer(model, data, args):
explainer = GNNExplainer(model, epochs=args.masks_epochs, lr=args.masks_lr, num_hops=args.hop, log=False)
node_indices = extract_test_nodes(data, args.test_samples)
num_noise_feats = []
for node_idx in tqdm(node_indices, desc='explain node', leave=False):
node_feat_mask, edge_mask = explainer.explain_node(node_idx, data.x, data.edge_index)
node_feat_mask = node_feat_mask.detach().cpu().numpy()
feat_indices = node_feat_mask.argsort()[-args.K:]
feat_indices = [idx for idx in feat_indices if node_feat_mask[idx] > args.masks_threshold]
num_noise_feat = sum(idx >= INPUT_DIM[args.dataset] for idx in feat_indices)
num_noise_feats.append(num_noise_feat)
return num_noise_feats
def find_noise_feats_by_LIME(model, data, args):
explainer = LIME(model, args.lime_samples)
node_indices = extract_test_nodes(data, args.test_samples)
num_noise_feats = []
for node_idx in tqdm(node_indices, desc='explain node', leave=False):
coefs = explainer.explain_node(node_idx, data.x, data.edge_index)
coefs = np.abs(coefs)
feat_indices = coefs.argsort()[-args.K:]
num_noise_feat = sum(idx >= INPUT_DIM[args.dataset] for idx in feat_indices)
num_noise_feats.append(num_noise_feat)
return num_noise_feats
def find_noise_feats_by_greedy(model, data, args):
explainer = Greedy(model)
node_indices = extract_test_nodes(data, args.test_samples)
delta_probas = explainer.explain_node(node_indices, data.x, data.edge_index) # (#test_smaples, #feats)
feat_indices = delta_probas.argsort(axis=-1)[:, -args.K:] # (#test_smaples, K)
num_noise_feats = []
for node_proba, node_feat_indices in zip(delta_probas, feat_indices):
node_feat_indices = [feat_idx for feat_idx in node_feat_indices if node_proba[feat_idx] > args.greedy_threshold]
num_noise_feat = sum(feat_idx >= INPUT_DIM[args.dataset] for feat_idx in node_feat_indices)
num_noise_feats.append(num_noise_feat)
return num_noise_feats
def find_noise_feats_by_random(data, args):
num_feats = data.x.size(1)
explainer = Random(num_feats, args.K)
num_noise_feats = []
for node_idx in tqdm(range(args.test_samples), desc='explain node', leave=False):
feat_indices = explainer.explain_node()
noise_feat = (feat_indices >= INPUT_DIM[args.dataset]).sum()
num_noise_feats.append(noise_feat)
return num_noise_feats
def main():
args = build_args()
check_args(args)
fix_seed(args.seed)
data = prepare_data(args)
hparams = {
'input_dim': data.x.size(1),
'hidden_dim': 16,
'output_dim': max(data.y).item() + 1
}
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GAT(**hparams).to(device)
data = data.to(device)
train(model, data, args)
# model.load_state_dict(torch.load('./examples/noise_features/model.pth'))
test_loss, test_acc = evaluate(model, data, mask=data.test_mask)
print('test_loss: {:.4f}, test_acc: {:.4f}'.format(test_loss, test_acc))
if test_acc < 0.8:
print('bad model! Please re-run!')
exit()
print('=== Explain by GraphLIME ===')
noise_feats = find_noise_feats_by_GraphLIME(model, data, args)
plot_dist(noise_feats, label='GraphLIME', ymax=args.ymax, color='g')
print('=== Explain by GNNExplainer ===')
noise_feats = find_noise_feats_by_GNNExplainer(model, data, args)
plot_dist(noise_feats, label='GNNExplainer', ymax=args.ymax, color='r')
print('=== Explain by LIME ===')
noise_feats = find_noise_feats_by_LIME(model, data, args)
plot_dist(noise_feats, label='LIME', ymax=args.ymax, color='C0')
print('=== Explain by Greedy ===')
noise_feats = find_noise_feats_by_greedy(model, data, args)
plot_dist(noise_feats, label='Greedy', ymax=args.ymax, color='orange')
print('=== Explain by Random ===')
noise_feats = find_noise_feats_by_random(data, args)
plot_dist(noise_feats, label='Random', ymax=args.ymax, color='k',
title=f'Distribution of noisy features on {args.dataset} for {model.__class__.__name__}',
save_path=f'{DIRNAME}/results/{args.dataset.lower()}.png')
plt.show()
if __name__ == "__main__":
main()
| 7,487 | 33.827907 | 120 | py |
GraphLIME | GraphLIME-master/exp/noise_features/utils.py | import os
import numpy as np
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
def prepare_data(args):
dataset = args.dataset.title()
dirname = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(dirname, '..', 'data', 'Planetoid')
dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures())
data = dataset[0]
data = modify_train_mask(data)
data = add_noise_features(data, args.num_noise)
return data
def modify_train_mask(data):
num_nodes = data.x.size(0)
num_train = int(num_nodes * 0.8)
node_indices = np.random.choice(num_nodes, size=num_train, replace=False)
new_train_mask = torch.zeros_like(data.train_mask)
new_train_mask[node_indices] = 1
new_train_mask = new_train_mask > 0
new_val_mask = torch.zeros_like(data.val_mask)
new_val_mask = new_val_mask > 0
new_test_mask = ~(new_train_mask + new_val_mask)
data.train_mask = new_train_mask
data.val_mask = new_val_mask
data.test_mask = new_test_mask
# val_mask = data.val_mask
# test_mask = data.test_mask
# new_train_mask = ~(val_mask + test_mask)
# data.train_mask = new_train_mask
return data
def add_noise_features(data, num_noise):
if not num_noise:
return data
num_nodes = data.x.size(0)
noise_feat = torch.randn((num_nodes, num_noise))
noise_feat = noise_feat - noise_feat.mean(1, keepdim=True)
data.x = torch.cat([data.x, noise_feat], dim=-1)
return data
def extract_test_nodes(data, num_samples):
test_indices = data.test_mask.cpu().numpy().nonzero()[0]
node_indices = np.random.choice(test_indices, num_samples).tolist()
return node_indices
def plot_dist(noise_feats, label=None, ymax=1.0, color=None, title=None, save_path=None):
sns.set_style('darkgrid')
ax = sns.distplot(noise_feats, hist=False, kde=True, kde_kws={'label': label}, color=color)
plt.xlim(-3, 11)
plt.ylim(ymin=0.0, ymax=ymax)
if title:
plt.title(title)
if save_path:
plt.savefig(save_path)
return ax
def train(model, data, args, verbose=True):
optimizer = optim.Adam(model.parameters(), lr=args.model_lr, weight_decay=5e-4)
train_loss_values, train_acc_values = [], []
test_loss_values, test_acc_values = [], []
best = np.inf
bad_counter = 0
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'model.pth')
for epoch in tqdm(range(args.model_epochs), desc='Training', leave=False):
if epoch == 0:
print(' | Trainging | Validation |')
print(' |-------------------|--------------------|')
print(' Epoch | loss accuracy | loss accuracy |')
print('-------|-------------------|--------------------|')
train_loss, train_acc = train_on_epoch(model, optimizer, data)
train_loss_values.append(train_loss.item())
train_acc_values.append(train_acc.item())
test_loss, test_acc = evaluate(model, data, data.test_mask)
test_loss_values.append(test_loss.item())
test_acc_values.append(test_acc.item())
if test_loss_values[-1] < best:
bad_counter = 0
log = ' {:3d} | {:.4f} {:.4f} | {:.4f} {:.4f} |'.format(epoch + 1,
train_loss.item(),
train_acc.item(),
test_loss.item(),
test_acc.item())
torch.save(model.state_dict(), model_path)
log += ' save model to {}'.format(model_path)
if verbose:
tqdm.write(log)
best = test_loss_values[-1]
else:
bad_counter += 1
print('-------------------------------------------------')
model.load_state_dict(torch.load(model_path))
def train_on_epoch(model, optimizer, data):
model.train()
optimizer.zero_grad()
output = model(data.x, data.edge_index)
train_loss = F.nll_loss(output[data.train_mask], data.y[data.train_mask])
train_acc = accuracy(output[data.train_mask], data.y[data.train_mask])
train_loss.backward()
optimizer.step()
return train_loss, train_acc
def evaluate(model, data, mask):
model.eval()
with torch.no_grad():
output = model(data.x, data.edge_index)
loss = F.nll_loss(output[mask], data.y[mask])
acc = accuracy(output[mask], data.y[mask])
return loss, acc
def accuracy(output, labels):
_, pred = output.max(dim=1)
correct = pred.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
| 5,089 | 28.766082 | 97 | py |
GraphLIME | GraphLIME-master/exp/noise_features/models.py | import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv
class GCN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.5):
super(GCN, self).__init__()
self.dropout = dropout
self.conv1 = GCNConv(input_dim, hidden_dim)
self.conv2 = GCNConv(hidden_dim, output_dim)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
class GAT(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, heads_1=8, heads_2=1, att_dropout=0.6, input_dropout=0.6):
super(GAT, self).__init__()
self.att_dropout = att_dropout
self.input_dropout = input_dropout
self.conv1 = GATConv(in_channels=input_dim,
out_channels=hidden_dim // heads_1,
heads=heads_1,
concat=True,
dropout=att_dropout)
self.conv2 = GATConv(in_channels=hidden_dim,
out_channels=output_dim,
heads=heads_2,
concat=False,
dropout=att_dropout)
def forward(self, x, edge_index):
x = F.dropout(x, p=self.input_dropout, training=self.training)
x = self.conv1(x, edge_index)
x = F.elu(x)
x = F.dropout(x, p=self.input_dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
| 1,699 | 32.333333 | 116 | py |
arl-eegmodels | arl-eegmodels-master/EEGModels.py | """
ARL_EEGModels - A collection of Convolutional Neural Network models for EEG
Signal Processing and Classification, using Keras and Tensorflow
Requirements:
(1) tensorflow == 2.X (as of this writing, 2.0 - 2.3 have been verified
as working)
To run the EEG/MEG ERP classification sample script, you will also need
(4) mne >= 0.17.1
(5) PyRiemann >= 0.2.5
(6) scikit-learn >= 0.20.1
(7) matplotlib >= 2.2.3
To use:
(1) Place this file in the PYTHONPATH variable in your IDE (i.e.: Spyder)
(2) Import the model as
from EEGModels import EEGNet
model = EEGNet(nb_classes = ..., Chans = ..., Samples = ...)
(3) Then compile and fit the model
model.compile(loss = ..., optimizer = ..., metrics = ...)
fitted = model.fit(...)
predicted = model.predict(...)
Portions of this project are works of the United States Government and are not
subject to domestic copyright protection under 17 USC Sec. 105. Those
portions are released world-wide under the terms of the Creative Commons Zero
1.0 (CC0) license.
Other portions of this project are subject to domestic copyright protection
under 17 USC Sec. 105. Those portions are licensed under the Apache 2.0
license. The complete text of the license governing this material is in
the file labeled LICENSE.TXT that is a part of this project's official
distribution.
"""
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Activation, Permute, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import SeparableConv2D, DepthwiseConv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import SpatialDropout2D
from tensorflow.keras.regularizers import l1_l2
from tensorflow.keras.layers import Input, Flatten
from tensorflow.keras.constraints import max_norm
from tensorflow.keras import backend as K
def EEGNet(nb_classes, Chans = 64, Samples = 128,
dropoutRate = 0.5, kernLength = 64, F1 = 8,
D = 2, F2 = 16, norm_rate = 0.25, dropoutType = 'Dropout'):
""" Keras Implementation of EEGNet
http://iopscience.iop.org/article/10.1088/1741-2552/aace8c/meta
Note that this implements the newest version of EEGNet and NOT the earlier
version (version v1 and v2 on arxiv). We strongly recommend using this
architecture as it performs much better and has nicer properties than
our earlier version. For example:
1. Depthwise Convolutions to learn spatial filters within a
temporal convolution. The use of the depth_multiplier option maps
exactly to the number of spatial filters learned within a temporal
filter. This matches the setup of algorithms like FBCSP which learn
spatial filters within each filter in a filter-bank. This also limits
the number of free parameters to fit when compared to a fully-connected
convolution.
2. Separable Convolutions to learn how to optimally combine spatial
filters across temporal bands. Separable Convolutions are Depthwise
Convolutions followed by (1x1) Pointwise Convolutions.
While the original paper used Dropout, we found that SpatialDropout2D
sometimes produced slightly better results for classification of ERP
signals. However, SpatialDropout2D significantly reduced performance
on the Oscillatory dataset (SMR, BCI-IV Dataset 2A). We recommend using
the default Dropout in most cases.
Assumes the input signal is sampled at 128Hz. If you want to use this model
for any other sampling rate you will need to modify the lengths of temporal
kernels and average pooling size in blocks 1 and 2 as needed (double the
kernel lengths for double the sampling rate, etc). Note that we haven't
tested the model performance with this rule so this may not work well.
The model with default parameters gives the EEGNet-8,2 model as discussed
in the paper. This model should do pretty well in general, although it is
advised to do some model searching to get optimal performance on your
particular dataset.
We set F2 = F1 * D (number of input filters = number of output filters) for
the SeparableConv2D layer. We haven't extensively tested other values of this
parameter (say, F2 < F1 * D for compressed learning, and F2 > F1 * D for
overcomplete). We believe the main parameters to focus on are F1 and D.
Inputs:
nb_classes : int, number of classes to classify
Chans, Samples : number of channels and time points in the EEG data
dropoutRate : dropout fraction
kernLength : length of temporal convolution in first layer. We found
that setting this to be half the sampling rate worked
well in practice. For the SMR dataset in particular
since the data was high-passed at 4Hz we used a kernel
length of 32.
F1, F2 : number of temporal filters (F1) and number of pointwise
filters (F2) to learn. Default: F1 = 8, F2 = F1 * D.
D : number of spatial filters to learn within each temporal
convolution. Default: D = 2
dropoutType : Either SpatialDropout2D or Dropout, passed as a string.
"""
if dropoutType == 'SpatialDropout2D':
dropoutType = SpatialDropout2D
elif dropoutType == 'Dropout':
dropoutType = Dropout
else:
raise ValueError('dropoutType must be one of SpatialDropout2D '
'or Dropout, passed as a string.')
input1 = Input(shape = (Chans, Samples, 1))
##################################################################
block1 = Conv2D(F1, (1, kernLength), padding = 'same',
input_shape = (Chans, Samples, 1),
use_bias = False)(input1)
block1 = BatchNormalization()(block1)
block1 = DepthwiseConv2D((Chans, 1), use_bias = False,
depth_multiplier = D,
depthwise_constraint = max_norm(1.))(block1)
block1 = BatchNormalization()(block1)
block1 = Activation('elu')(block1)
block1 = AveragePooling2D((1, 4))(block1)
block1 = dropoutType(dropoutRate)(block1)
block2 = SeparableConv2D(F2, (1, 16),
use_bias = False, padding = 'same')(block1)
block2 = BatchNormalization()(block2)
block2 = Activation('elu')(block2)
block2 = AveragePooling2D((1, 8))(block2)
block2 = dropoutType(dropoutRate)(block2)
flatten = Flatten(name = 'flatten')(block2)
dense = Dense(nb_classes, name = 'dense',
kernel_constraint = max_norm(norm_rate))(flatten)
softmax = Activation('softmax', name = 'softmax')(dense)
return Model(inputs=input1, outputs=softmax)
def EEGNet_SSVEP(nb_classes = 12, Chans = 8, Samples = 256,
dropoutRate = 0.5, kernLength = 256, F1 = 96,
D = 1, F2 = 96, dropoutType = 'Dropout'):
""" SSVEP Variant of EEGNet, as used in [1].
Inputs:
nb_classes : int, number of classes to classify
Chans, Samples : number of channels and time points in the EEG data
dropoutRate : dropout fraction
kernLength : length of temporal convolution in first layer
F1, F2 : number of temporal filters (F1) and number of pointwise
filters (F2) to learn.
D : number of spatial filters to learn within each temporal
convolution.
dropoutType : Either SpatialDropout2D or Dropout, passed as a string.
[1]. Waytowich, N. et. al. (2018). Compact Convolutional Neural Networks
for Classification of Asynchronous Steady-State Visual Evoked Potentials.
Journal of Neural Engineering vol. 15(6).
http://iopscience.iop.org/article/10.1088/1741-2552/aae5d8
"""
if dropoutType == 'SpatialDropout2D':
dropoutType = SpatialDropout2D
elif dropoutType == 'Dropout':
dropoutType = Dropout
else:
raise ValueError('dropoutType must be one of SpatialDropout2D '
'or Dropout, passed as a string.')
input1 = Input(shape = (Chans, Samples, 1))
##################################################################
block1 = Conv2D(F1, (1, kernLength), padding = 'same',
input_shape = (Chans, Samples, 1),
use_bias = False)(input1)
block1 = BatchNormalization()(block1)
block1 = DepthwiseConv2D((Chans, 1), use_bias = False,
depth_multiplier = D,
depthwise_constraint = max_norm(1.))(block1)
block1 = BatchNormalization()(block1)
block1 = Activation('elu')(block1)
block1 = AveragePooling2D((1, 4))(block1)
block1 = dropoutType(dropoutRate)(block1)
block2 = SeparableConv2D(F2, (1, 16),
use_bias = False, padding = 'same')(block1)
block2 = BatchNormalization()(block2)
block2 = Activation('elu')(block2)
block2 = AveragePooling2D((1, 8))(block2)
block2 = dropoutType(dropoutRate)(block2)
flatten = Flatten(name = 'flatten')(block2)
dense = Dense(nb_classes, name = 'dense')(flatten)
softmax = Activation('softmax', name = 'softmax')(dense)
return Model(inputs=input1, outputs=softmax)
def EEGNet_old(nb_classes, Chans = 64, Samples = 128, regRate = 0.0001,
dropoutRate = 0.25, kernels = [(2, 32), (8, 4)], strides = (2, 4)):
""" Keras Implementation of EEGNet_v1 (https://arxiv.org/abs/1611.08024v2)
This model is the original EEGNet model proposed on arxiv
https://arxiv.org/abs/1611.08024v2
with a few modifications: we use striding instead of max-pooling as this
helped slightly in classification performance while also providing a
computational speed-up.
Note that we no longer recommend the use of this architecture, as the new
version of EEGNet performs much better overall and has nicer properties.
Inputs:
nb_classes : total number of final categories
Chans, Samples : number of EEG channels and samples, respectively
regRate : regularization rate for L1 and L2 regularizations
dropoutRate : dropout fraction
kernels : the 2nd and 3rd layer kernel dimensions (default is
the [2, 32] x [8, 4] configuration)
strides : the stride size (note that this replaces the max-pool
used in the original paper)
"""
# start the model
input_main = Input((Chans, Samples))
layer1 = Conv2D(16, (Chans, 1), input_shape=(Chans, Samples, 1),
kernel_regularizer = l1_l2(l1=regRate, l2=regRate))(input_main)
layer1 = BatchNormalization()(layer1)
layer1 = Activation('elu')(layer1)
layer1 = Dropout(dropoutRate)(layer1)
permute_dims = 2, 1, 3
permute1 = Permute(permute_dims)(layer1)
layer2 = Conv2D(4, kernels[0], padding = 'same',
kernel_regularizer=l1_l2(l1=0.0, l2=regRate),
strides = strides)(permute1)
layer2 = BatchNormalization()(layer2)
layer2 = Activation('elu')(layer2)
layer2 = Dropout(dropoutRate)(layer2)
layer3 = Conv2D(4, kernels[1], padding = 'same',
kernel_regularizer=l1_l2(l1=0.0, l2=regRate),
strides = strides)(layer2)
layer3 = BatchNormalization()(layer3)
layer3 = Activation('elu')(layer3)
layer3 = Dropout(dropoutRate)(layer3)
flatten = Flatten(name = 'flatten')(layer3)
dense = Dense(nb_classes, name = 'dense')(flatten)
softmax = Activation('softmax', name = 'softmax')(dense)
return Model(inputs=input_main, outputs=softmax)
def DeepConvNet(nb_classes, Chans = 64, Samples = 256,
dropoutRate = 0.5):
""" Keras implementation of the Deep Convolutional Network as described in
Schirrmeister et. al. (2017), Human Brain Mapping.
This implementation assumes the input is a 2-second EEG signal sampled at
128Hz, as opposed to signals sampled at 250Hz as described in the original
paper. We also perform temporal convolutions of length (1, 5) as opposed
to (1, 10) due to this sampling rate difference.
Note that we use the max_norm constraint on all convolutional layers, as
well as the classification layer. We also change the defaults for the
BatchNormalization layer. We used this based on a personal communication
with the original authors.
ours original paper
pool_size 1, 2 1, 3
strides 1, 2 1, 3
conv filters 1, 5 1, 10
Note that this implementation has not been verified by the original
authors.
"""
# start the model
input_main = Input((Chans, Samples, 1))
block1 = Conv2D(25, (1, 5),
input_shape=(Chans, Samples, 1),
kernel_constraint = max_norm(2., axis=(0,1,2)))(input_main)
block1 = Conv2D(25, (Chans, 1),
kernel_constraint = max_norm(2., axis=(0,1,2)))(block1)
block1 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block1)
block1 = Activation('elu')(block1)
block1 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block1)
block1 = Dropout(dropoutRate)(block1)
block2 = Conv2D(50, (1, 5),
kernel_constraint = max_norm(2., axis=(0,1,2)))(block1)
block2 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block2)
block2 = Activation('elu')(block2)
block2 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block2)
block2 = Dropout(dropoutRate)(block2)
block3 = Conv2D(100, (1, 5),
kernel_constraint = max_norm(2., axis=(0,1,2)))(block2)
block3 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block3)
block3 = Activation('elu')(block3)
block3 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block3)
block3 = Dropout(dropoutRate)(block3)
block4 = Conv2D(200, (1, 5),
kernel_constraint = max_norm(2., axis=(0,1,2)))(block3)
block4 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block4)
block4 = Activation('elu')(block4)
block4 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block4)
block4 = Dropout(dropoutRate)(block4)
flatten = Flatten()(block4)
dense = Dense(nb_classes, kernel_constraint = max_norm(0.5))(flatten)
softmax = Activation('softmax')(dense)
return Model(inputs=input_main, outputs=softmax)
# need these for ShallowConvNet
def square(x):
return K.square(x)
def log(x):
return K.log(K.clip(x, min_value = 1e-7, max_value = 10000))
def ShallowConvNet(nb_classes, Chans = 64, Samples = 128, dropoutRate = 0.5):
""" Keras implementation of the Shallow Convolutional Network as described
in Schirrmeister et. al. (2017), Human Brain Mapping.
Assumes the input is a 2-second EEG signal sampled at 128Hz. Note that in
the original paper, they do temporal convolutions of length 25 for EEG
data sampled at 250Hz. We instead use length 13 since the sampling rate is
roughly half of the 250Hz which the paper used. The pool_size and stride
in later layers is also approximately half of what is used in the paper.
Note that we use the max_norm constraint on all convolutional layers, as
well as the classification layer. We also change the defaults for the
BatchNormalization layer. We used this based on a personal communication
with the original authors.
ours original paper
pool_size 1, 35 1, 75
strides 1, 7 1, 15
conv filters 1, 13 1, 25
Note that this implementation has not been verified by the original
authors. We do note that this implementation reproduces the results in the
original paper with minor deviations.
"""
# start the model
input_main = Input((Chans, Samples, 1))
block1 = Conv2D(40, (1, 13),
input_shape=(Chans, Samples, 1),
kernel_constraint = max_norm(2., axis=(0,1,2)))(input_main)
block1 = Conv2D(40, (Chans, 1), use_bias=False,
kernel_constraint = max_norm(2., axis=(0,1,2)))(block1)
block1 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block1)
block1 = Activation(square)(block1)
block1 = AveragePooling2D(pool_size=(1, 35), strides=(1, 7))(block1)
block1 = Activation(log)(block1)
block1 = Dropout(dropoutRate)(block1)
flatten = Flatten()(block1)
dense = Dense(nb_classes, kernel_constraint = max_norm(0.5))(flatten)
softmax = Activation('softmax')(dense)
return Model(inputs=input_main, outputs=softmax)
| 18,033 | 43.74938 | 96 | py |
arl-eegmodels | arl-eegmodels-master/examples/ERP.py | """
Sample script using EEGNet to classify Event-Related Potential (ERP) EEG data
from a four-class classification task, using the sample dataset provided in
the MNE [1, 2] package:
https://martinos.org/mne/stable/manual/sample_dataset.html#ch-sample-data
The four classes used from this dataset are:
LA: Left-ear auditory stimulation
RA: Right-ear auditory stimulation
LV: Left visual field stimulation
RV: Right visual field stimulation
The code to process, filter and epoch the data are originally from Alexandre
Barachant's PyRiemann [3] package, released under the BSD 3-clause. A copy of
the BSD 3-clause license has been provided together with this software to
comply with software licensing requirements.
When you first run this script, MNE will download the dataset and prompt you
to confirm the download location (defaults to ~/mne_data). Follow the prompts
to continue. The dataset size is approx. 1.5GB download.
For comparative purposes you can also compare EEGNet performance to using
Riemannian geometric approaches with xDAWN spatial filtering [4-8] using
PyRiemann (code provided below).
[1] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck,
L. Parkkonen, M. Hämäläinen, MNE software for processing MEG and EEG data,
NeuroImage, Volume 86, 1 February 2014, Pages 446-460, ISSN 1053-8119.
[2] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck,
R. Goj, M. Jas, T. Brooks, L. Parkkonen, M. Hämäläinen, MEG and EEG data
analysis with MNE-Python, Frontiers in Neuroscience, Volume 7, 2013.
[3] https://github.com/alexandrebarachant/pyRiemann.
[4] A. Barachant, M. Congedo ,"A Plug&Play P300 BCI Using Information Geometry"
arXiv:1409.0107. link
[5] M. Congedo, A. Barachant, A. Andreev ,"A New generation of Brain-Computer
Interface Based on Riemannian Geometry", arXiv: 1310.8115.
[6] A. Barachant and S. Bonnet, "Channel selection procedure using riemannian
distance for BCI applications," in 2011 5th International IEEE/EMBS
Conference on Neural Engineering (NER), 2011, 348-351.
[7] A. Barachant, S. Bonnet, M. Congedo and C. Jutten, “Multiclass
Brain-Computer Interface Classification by Riemannian Geometry,” in IEEE
Transactions on Biomedical Engineering, vol. 59, no. 4, p. 920-928, 2012.
[8] A. Barachant, S. Bonnet, M. Congedo and C. Jutten, “Classification of
covariance matrices using a Riemannian-based kernel for BCI applications“,
in NeuroComputing, vol. 112, p. 172-178, 2013.
Portions of this project are works of the United States Government and are not
subject to domestic copyright protection under 17 USC Sec. 105. Those
portions are released world-wide under the terms of the Creative Commons Zero
1.0 (CC0) license.
Other portions of this project are subject to domestic copyright protection
under 17 USC Sec. 105. Those portions are licensed under the Apache 2.0
license. The complete text of the license governing this material is in
the file labeled LICENSE.TXT that is a part of this project's official
distribution.
"""
import numpy as np
# mne imports
import mne
from mne import io
from mne.datasets import sample
# EEGNet-specific imports
from EEGModels import EEGNet
from tensorflow.keras import utils as np_utils
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import backend as K
# PyRiemann imports
from pyriemann.estimation import XdawnCovariances
from pyriemann.tangentspace import TangentSpace
from pyriemann.utils.viz import plot_confusion_matrix
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
# tools for plotting confusion matrices
from matplotlib import pyplot as plt
# while the default tensorflow ordering is 'channels_last' we set it here
# to be explicit in case if the user has changed the default ordering
K.set_image_data_format('channels_last')
##################### Process, filter and epoch the data ######################
data_path = sample.data_path()
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0., 1
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True, verbose=False)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True, verbose=False)
labels = epochs.events[:, -1]
# extract raw data. scale by 1000 due to scaling sensitivity in deep learning
X = epochs.get_data()*1000 # format is in (trials, channels, samples)
y = labels
kernels, chans, samples = 1, 60, 151
# take 50/25/25 percent of the data to train/validate/test
X_train = X[0:144,]
Y_train = y[0:144]
X_validate = X[144:216,]
Y_validate = y[144:216]
X_test = X[216:,]
Y_test = y[216:]
############################# EEGNet portion ##################################
# convert labels to one-hot encodings.
Y_train = np_utils.to_categorical(Y_train-1)
Y_validate = np_utils.to_categorical(Y_validate-1)
Y_test = np_utils.to_categorical(Y_test-1)
# convert data to NHWC (trials, channels, samples, kernels) format. Data
# contains 60 channels and 151 time-points. Set the number of kernels to 1.
X_train = X_train.reshape(X_train.shape[0], chans, samples, kernels)
X_validate = X_validate.reshape(X_validate.shape[0], chans, samples, kernels)
X_test = X_test.reshape(X_test.shape[0], chans, samples, kernels)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# configure the EEGNet-8,2,16 model with kernel length of 32 samples (other
# model configurations may do better, but this is a good starting point)
model = EEGNet(nb_classes = 4, Chans = chans, Samples = samples,
dropoutRate = 0.5, kernLength = 32, F1 = 8, D = 2, F2 = 16,
dropoutType = 'Dropout')
# compile the model and set the optimizers
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics = ['accuracy'])
# count number of parameters in the model
numParams = model.count_params()
# set a valid path for your system to record model checkpoints
checkpointer = ModelCheckpoint(filepath='/tmp/checkpoint.h5', verbose=1,
save_best_only=True)
###############################################################################
# if the classification task was imbalanced (significantly more trials in one
# class versus the others) you can assign a weight to each class during
# optimization to balance it out. This data is approximately balanced so we
# don't need to do this, but is shown here for illustration/completeness.
###############################################################################
# the syntax is {class_1:weight_1, class_2:weight_2,...}. Here just setting
# the weights all to be 1
class_weights = {0:1, 1:1, 2:1, 3:1}
################################################################################
# fit the model. Due to very small sample sizes this can get
# pretty noisy run-to-run, but most runs should be comparable to xDAWN +
# Riemannian geometry classification (below)
################################################################################
fittedModel = model.fit(X_train, Y_train, batch_size = 16, epochs = 300,
verbose = 2, validation_data=(X_validate, Y_validate),
callbacks=[checkpointer], class_weight = class_weights)
# load optimal weights
model.load_weights('/tmp/checkpoint.h5')
###############################################################################
# can alternatively used the weights provided in the repo. If so it should get
# you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
# system.
###############################################################################
# WEIGHTS_PATH = /path/to/EEGNet-8-2-weights.h5
# model.load_weights(WEIGHTS_PATH)
###############################################################################
# make prediction on test set.
###############################################################################
probs = model.predict(X_test)
preds = probs.argmax(axis = -1)
acc = np.mean(preds == Y_test.argmax(axis=-1))
print("Classification accuracy: %f " % (acc))
############################# PyRiemann Portion ##############################
# code is taken from PyRiemann's ERP sample script, which is decoding in
# the tangent space with a logistic regression
n_components = 2 # pick some components
# set up sklearn pipeline
clf = make_pipeline(XdawnCovariances(n_components),
TangentSpace(metric='riemann'),
LogisticRegression())
preds_rg = np.zeros(len(Y_test))
# reshape back to (trials, channels, samples)
X_train = X_train.reshape(X_train.shape[0], chans, samples)
X_test = X_test.reshape(X_test.shape[0], chans, samples)
# train a classifier with xDAWN spatial filtering + Riemannian Geometry (RG)
# labels need to be back in single-column format
clf.fit(X_train, Y_train.argmax(axis = -1))
preds_rg = clf.predict(X_test)
# Printing the results
acc2 = np.mean(preds_rg == Y_test.argmax(axis = -1))
print("Classification accuracy: %f " % (acc2))
# plot the confusion matrices for both classifiers
names = ['audio left', 'audio right', 'vis left', 'vis right']
plt.figure(0)
plot_confusion_matrix(preds, Y_test.argmax(axis = -1), names, title = 'EEGNet-8,2')
plt.figure(1)
plot_confusion_matrix(preds_rg, Y_test.argmax(axis = -1), names, title = 'xDAWN + RG')
| 10,178 | 40.717213 | 86 | py |
Paddle | Paddle-master/python/paddle/trainer/config_parser.py | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
'''
The following functions are available in the config file:
Bias: define bias. To be used as value of bias argument in Layer().
Data: define data provider.
Input: define input layer for a layer. To be used as element of inputs argument
in Layer().
Conv: define a convolution operation for an input of a layer.
Norm: define a normalization operation for an input of a layer.
Pool: define a pooling operation for an input of a layer.
Layer: define a layer.
Parameter: define a parameter.
Import: import another config file. If the imported config file name is
a relative path, then it will be searched under the directory of the
current config file.
Inputs(layer_names...):
Define the name of the input layers of the NeuralNetwork.
The type of these layers must be "data".
These layers will be provided with the DataBatch obtained
from DataProvider. The data streams from DataProvider must
have the same order.
Outputs(layer_names...):
Define the name of the output layers of the NeuralNetwork.
Usually the output is simply the cost layer.
You can specify other layers as outputs and calculate the
cost (and its derivative) yourself.
default_initial_std(val)
default_initial_mean(val)
default_momentum(val):
default_decay_rate(val): Set the default value for these parameters
get_config_arg(name, type, default): Get the value for a config parameter.
*** customized extension to config_parser ***
The functionality of the config_parser can be extended.
If the config_arg_str for parse_config() contains
extension_module_name=[MODULE_NAME], then config_parser will call
MODULE_NAME.get_config_funcs(g_config)
MODULE_NAME.get_config_funcs() should return a dictionary of name to functions,
those functions will be available in the config file.
See trainer/tests/config_parser_test.py for example
To use this from paddle_trainer, paddle_trainer should be called with
--config_args=extension_module_name=[MODULE_NAME]
'''
import copy
import logging
import os
import sys
import traceback
import math
import shutil
try:
from paddle.proto.DataConfig_pb2 import DataConfig
from paddle.proto.ModelConfig_pb2 import ModelConfig
from paddle.proto.ModelConfig_pb2 import LayerConfig
from paddle.proto.ModelConfig_pb2 import LayerInputConfig
from paddle.proto.ModelConfig_pb2 import ProjectionConfig
from paddle.proto.ModelConfig_pb2 import OperatorConfig
from paddle.proto.ModelConfig_pb2 import GeneratorConfig
from paddle.proto.ModelConfig_pb2 import LinkConfig
from paddle.proto.ParameterConfig_pb2 import ParameterConfig
from paddle.proto.ParameterConfig_pb2 import ParameterUpdaterHookConfig
from paddle.proto.TrainerConfig_pb2 import TrainerConfig
except Exception as e:
traceback.print_exc()
raise
logging.basicConfig(
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', )
logger = logging.getLogger('paddle')
logger.setLevel(logging.INFO)
__real_print__ = print
print = logger.info
# from layer type name to layer class
g_layer_type_map = {}
# Initialize global variables. We use this function so that we can
# call parse_config() multiple times
def init_config_environment(
g_default_momentum=None,
g_default_decay_rate=None,
g_default_initial_mean=0.,
g_default_initial_std=0.01,
g_default_num_batches_regularization=None,
g_default_initial_strategy=0,
g_default_initial_smart=False,
g_default_gradient_clipping_threshold=None,
g_default_device=None,
g_default_update_hooks=None,
g_default_compact_func=None,
g_config=TrainerConfig(),
g_layer_map={},
g_parameter_map={},
g_parameter_initializer_map={},
g_extended_config_funcs={},
# store command args of paddle_trainer
g_command_config_args={},
# Used for PyDataProvider to avoid duplicate module name
g_py_module_name_list=[],
g_current_submodel=None,
g_root_submodel=None,
g_submodel_map={},
g_submodel_stack=[],
g_add_submodel_suffix=False, ):
# directly iterate through locals().iteritems() will change
# the size of locals() due to introducing k, v into scope
# which will break the process in some env
local_vars = copy.deepcopy(locals())
for k, v in local_vars.iteritems():
globals()[k] = v
# Because type is widely used as a variable name in this code.
# we need a different function name for the builtin type()
def type_of(x):
return type(x)
# Check a condition derived config file
def config_assert(b, msg):
if not b:
logger.fatal(msg)
g_config_funcs = {}
# decorator for indicating a function which can be used in config file
def config_func(func):
g_config_funcs[func.func_name] = func
return func
# decorator for indicating a class which can be used in config file
def config_class(cls):
g_config_funcs[cls.__name__] = cls
return cls
# decorator for indicating a class for a layer type
def config_layer(layer_type):
def wrap(cls):
g_config_funcs[cls.__name__] = cls
g_layer_type_map[layer_type] = cls
return cls
return wrap
def gen_parameter_name(layer_name, input_index):
return '_%s.w%d' % (layer_name, input_index)
def gen_bias_parameter_name(layer_name):
return '_%s.wbias' % layer_name
def default(x, default_value):
return default_value if x is None else x
class Cfg(object):
def add_keys(self, locals):
for k, v in locals.iteritems():
if not k.startswith('_'):
self.__setattr__(k, v)
# functions available in config file
# Define the name of the input layers of the NeuralNetwork.
# The type of these layers must be "data".
# These layers will be provided with the DataBatch obtained
# from DataProvider. The data streams from DataProvider must
# have the same order.
@config_func
def Inputs(*args):
for name in args:
name = MakeLayerNameInSubmodel(name)
global g_current_submodel, g_root_submodel
if g_current_submodel.is_recurrent_layer_group:
config_assert(False, "Do not set Inputs in recurrent layer group")
else:
g_current_submodel.input_layer_names.append(name)
if g_current_submodel is g_root_submodel:
g_config.model_config.input_layer_names.append(name)
@config_func
def HasInputsSet():
return len(g_current_submodel.input_layer_names) != 0
# Define the name of the output layers of the NeuralNetwork.
# Usually the output is simply the cost layer.
# You can specify other layers as outputs and calculate the
# cost (and its derivative) yourself.
@config_func
def Outputs(*args):
for name in args:
name = MakeLayerNameInSubmodel(name)
global g_current_submodel, g_root_submodel
if g_current_submodel.is_recurrent_layer_group:
config_assert(False, "Do not set Outputs in recurrent layer group")
else:
g_current_submodel.output_layer_names.append(name)
if g_current_submodel is g_root_submodel:
g_config.model_config.output_layer_names.append(name)
@config_func
def SubModelBegin(name):
global g_current_submodel, g_root_submodel, g_submodel_stack
g_submodel_stack.append(g_current_submodel)
name = MakeLayerNameInParentSubmodel(name) #rename in nested submodel
config_assert(name not in g_submodel_map,
'Duplicated submodel name: %s' % name)
sub_model = g_config.model_config.sub_models.add()
sub_model.name = name
g_submodel_map[name] = sub_model
g_current_submodel = sub_model
@config_func
def SubModelEnd(name=None):
global g_current_submodel, g_root_submodel, g_submodel_stack
config_assert(g_current_submodel is not g_root_submodel,
"submodel not begin")
if name is not None:
config_assert(
g_current_submodel.name == MakeLayerNameInParentSubmodel(name),
"submodel name error")
g_current_submodel = g_submodel_stack.pop()
def MakeLayerNameInParentSubmodel(name):
suffix = ""
if len(g_submodel_stack) > 1:
suffix = "@" + g_submodel_stack[-1].name
return name + suffix
def GetLayerBaseName(name):
return name.split('@')[0]
def MakeLayerNameInSubmodel(name, submodel_name=None):
global g_current_submodel
global g_add_submodel_suffix
if (submodel_name is None and not g_add_submodel_suffix and
not g_current_submodel.is_recurrent_layer_group):
return name
if submodel_name is None:
submodel_name = g_current_submodel.name
return name + "@" + submodel_name
# Define a recurrent layer group begin with RecurrentLayerGroupBegin
# and end with RecurrentLayerGroupEnd.
# A recurrent layer group forward/backward one frame after previous frame
# forward/backward through all layers in layer group.
# in_links are names of layer used as input layer in the layer group.
# out_links are names of layer in layer group used as outside layer's input.
#
# If generator is set, the layer group need one or more than one outlinks.
# The first outlink should always be the generated token ids.
# If generator.num_results_per_sample is not set, the output for one sample is
# a ids sequence. Else if num_results_per_sample is more than one,
# the output for one sample is up to #num_results_per_sample generated
# sequences, which are packed in one sequence in output ids vector. Each
# generated sequence has a generation probability. The probabilities for one
# sample are stored in one row of output value matrix.
# Packed generated sequences format, for each i:
# seq_i_length: one interger, seq_i content length,
# [seq_i content], length = seq_i_length
# seq_i_end_mark: one interger, for format check, always -1
# You can use "seq_text_printer" to print the output of the generator.
@config_func
def RecurrentLayerGroupWithoutOutLinksBegin(name,
in_links,
seq_reversed=False,
target_inlinkname=""):
global g_current_submodel
config_assert(g_config.model_config.type == "recurrent_nn",
"RecurrentLayerGroup should be used only in recurrent_nn")
RecurrentLayerGroup(name=name) # add to father model
SubModelBegin(name)
g_current_submodel.is_recurrent_layer_group = True
g_current_submodel.reversed = seq_reversed
in_links_count = 0
for linkid, link in enumerate(in_links):
if isinstance(link, basestring):
name = link
else:
name = link.link_name
in_links_count += 1
layer_name = MakeLayerNameInParentSubmodel(name)
layer = g_layer_map[layer_name]
ScatterAgentLayer(
name=name, size=layer.size, width=layer.width, height=layer.height)
pair = g_current_submodel.in_links.add()
pair.layer_name = layer_name
pair.link_name = MakeLayerNameInSubmodel(name)
@config_func
def RecurrentLayerGroupSetOutLink(link):
if isinstance(link, basestring):
name = link
else:
name = link.link_name
layer_name = MakeLayerNameInParentSubmodel(name)
pair = g_current_submodel.out_links.add()
pair.layer_name = MakeLayerNameInSubmodel(name)
pair.link_name = layer_name
def RecurrentLayerGroupSetGenerator(generator=None):
generator.eos_layer_name = MakeLayerNameInSubmodel(generator.eos_layer_name)
g_current_submodel.generator.CopyFrom(generator)
@config_func
def RecurrentLayerGroupBegin(name,
in_links,
out_links,
generator=None,
target_inlinkname="",
seq_reversed=False):
RecurrentLayerGroupWithoutOutLinksBegin(name, in_links, seq_reversed)
for link in out_links:
RecurrentLayerGroupSetOutLink(link)
if generator is not None:
RecurrentLayerGroupSetGenerator(generator)
config_assert(
len(in_links) == 0, "no in_links should be passed to generator")
config_assert(
len(out_links) >= 1,
"one or more than one out_links should be passed to generator")
@config_func
def RecurrentLayerGroupEnd(name):
global g_current_submodel
config_assert(g_current_submodel.is_recurrent_layer_group,
"RecurrentLayerGroup not begin")
for pair in g_current_submodel.memories: #check exist
layer = g_layer_map[pair.layer_name]
config_assert(layer is not None,
"memory declare wrong name:%s" % pair.layer_name)
memory_link = g_layer_map[pair.link_name]
config_assert(layer.size == memory_link.size,
"memory declare wrong size:%d" % memory_link.size)
prev_submodel = g_current_submodel
SubModelEnd(name)
for pair in prev_submodel.out_links:
layer = g_layer_map[pair.layer_name]
# add out agent to father model
agent_name = GetLayerBaseName(pair.link_name)
if prev_submodel.HasField("generator"):
DataLayer(name=agent_name, size=layer.size)
else:
GatherAgentLayer(name=agent_name, size=layer.size)
# Define the model type
# currently, the paddle supports "nn", "recurrent_nn", "recursive_nn" and "multi_nn"
@config_func
def model_type(name):
g_config.model_config.type = name
@config_class
class Bias(Cfg):
def __init__(self,
parameter_name=None,
learning_rate=None,
momentum=None,
decay_rate=None,
decay_rate_l1=None,
initial_mean=None,
initial_std=None,
initial_strategy=None,
initial_smart=None,
num_batches_regularization=None,
sparse_remote_update=None,
gradient_clipping_threshold=None,
is_static=None,
is_shared=None,
initializer=None):
self.add_keys(locals())
# Define one input for a layer
@config_class
class Input(Cfg):
def __init__(
self,
input_layer_name,
parameter_name=None,
initializer=None,
learning_rate=None,
momentum=None,
decay_rate=None,
decay_rate_l1=None,
initial_mean=None,
initial_std=None,
initial_strategy=None,
initial_smart=None,
num_batches_regularization=None,
sparse_remote_update=None,
sparse_update=None,
gradient_clipping_threshold=None,
conv=None,
bilinear_interp=None,
norm=None,
pool=None,
image=None,
block_expand=None,
maxout=None,
spp=None,
pad=None,
upsample=None,
format=None,
nnz=None,
is_static=None,
is_shared=None,
update_hooks=None,
input_layer_argument=None,
make_layer_name_in_submodel=True, ):
"""
@param make_layer_name_in_submodel True by defalut, you might need to
set it carefully when adding Input in config_parser.py.
"""
self.add_keys(locals())
self.input_layer_name = MakeLayerNameInSubmodel(
input_layer_name
) if make_layer_name_in_submodel else input_layer_name
# Define a projection for iexed layer
@config_class
class Projection(Input):
type = None # subclass should set it correctly
def __init__(
self,
input_layer_name,
size=0, # projection output size
parameter_name=None,
learning_rate=None,
momentum=None,
decay_rate=None,
decay_rate_l1=None,
initial_mean=None,
initial_std=None,
initial_strategy=None,
initial_smart=None,
initializer=None,
num_batches_regularization=None,
sparse_remote_update=None,
sparse_update=None,
gradient_clipping_threshold=None,
ptype=None,
format=None,
nnz=None,
is_static=None,
is_shared=None,
update_hooks=None,
input_layer_argument=None, ):
self.add_keys(locals())
self.input_layer_name = MakeLayerNameInSubmodel(input_layer_name)
self.proj_conf = ProjectionConfig()
if ptype is not None:
self.proj_conf.type = ptype
else:
self.proj_conf.type = self.type
# calculate the output_size given input_size. return 0
# to indicate using the size from Layer config
def calc_output_size(self, input_layer_config):
return self.size
def calc_parameter_size(self, input_size, output_size):
raise NotimplementedError
def calc_parameter_dims(self, input_size, output_size):
raise NotimplementedError
@config_class
class IdentityProjection(Projection):
type = 'identity'
def calc_output_size(self, input_layer_config):
return input_layer_config.size
def calc_parameter_size(self, input_size, output_size):
return 0
def calc_parameter_dims(self, input_size, output_size):
return []
# Like IdentityProjection, but layer size may smaller than input size,
# the projection select dimesions [offset, offset+layer_size) from input
@config_class
class IdentityOffsetProjection(Projection):
type = 'identity_offset'
def __init__(self, input_layer_name, offset, **xargs):
super(IdentityOffsetProjection, self).__init__(input_layer_name,
**xargs)
self.proj_conf.offset = offset
def calc_output_size(self, input_layer_config):
return 0 # depends on the outside MixedLayer
def calc_parameter_size(self, input_size, output_size):
return 0
def calc_parameter_dims(self, input_size, output_size):
return []
@config_class
class SliceProjection(Projection):
type = 'slice'
def __init__(self, input_layer_name, slices, **xargs):
super(SliceProjection, self).__init__(input_layer_name, **xargs)
input = g_layer_map[input_layer_name]
if input.type in ["exconv", "cudnn_conv"]:
# the slice operator is for the channel dimension
assert input.num_filters is not None
channels = input.num_filters
image_size = input.size / channels
assert slices[len(slices) - 1][1] <= channels
for i in xrange(len(slices)):
slice = self.proj_conf.slices.add()
slice.start = slices[i][0] * image_size
slice.end = slices[i][1] * image_size
self.size += slice.end - slice.start
else:
config_assert(False,
'Currently the input should be convolution layer')
def calc_parameter_size(self, input_size, output_size):
return 0
def calc_parameter_dims(self, input_size, output_size):
return []
# DotMulProjection performs element-wise multiplication with weight
@config_class
class DotMulProjection(Projection):
type = 'dot_mul'
def calc_output_size(self, input_layer_config):
return input_layer_config.size
def calc_parameter_size(self, input_size, output_size):
return output_size
def calc_parameter_dims(self, input_size, output_size):
return [1, output_size]
# ScalingProjection
@config_class
class ScalingProjection(Projection):
type = 'scaling'
def calc_output_size(self, input_layer_config):
return input_layer_config.size
def calc_parameter_size(self, input_size, output_size):
return 1
def calc_parameter_dims(self, input_size, output_size):
return [1, 1]
@config_class
class TableProjection(Projection):
type = 'table'
def calc_parameter_size(self, input_size, output_size):
return input_size * output_size
def calc_parameter_dims(self, input_size, output_size):
return [input_size, output_size]
@config_class
class FullMatrixProjection(Projection):
type = 'fc'
def calc_parameter_size(self, input_size, output_size):
return input_size * output_size
def calc_parameter_dims(self, input_size, output_size):
return [input_size, output_size]
@config_class
class TransposedFullMatrixProjection(Projection):
type = 'trans_fc'
def calc_parameter_size(self, input_size, output_size):
return input_size * output_size
def calc_parameter_dims(self, input_size, output_size):
return [output_size, input_size]
@config_class
class ContextProjection(Projection):
type = 'context'
def __init__(self, input_layer_name, context_start, context_length,
trainable_padding, **xargs):
super(ContextProjection, self).__init__(input_layer_name, **xargs)
self.proj_conf.context_start = context_start
self.proj_conf.context_length = context_length
self.proj_conf.trainable_padding = trainable_padding
self._total_pad = max(0, -self.proj_conf.context_start) \
+ max(0, self.proj_conf.context_start \
+ self.proj_conf.context_length - 1)
def calc_output_size(self, input_layer_config):
return input_layer_config.size * self.proj_conf.context_length
def calc_parameter_size(self, input_size, output_size):
if self.proj_conf.trainable_padding == False:
return 0
else:
return input_size * self._total_pad
def calc_parameter_dims(self, input_size, output_size):
return [self._total_pad, input_size]
_total_pad = 0
@config_class
class ConvBaseProjection(Projection):
def __init__(self,
input_layer_name,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvBaseProjection, self).__init__(input_layer_name, **xargs)
if num_filters is not None:
self.proj_conf.num_filters = num_filters
def calc_output_size(self, input_layer_config):
return self.proj_conf.output_size
def calc_parameter_size(self, input_size, output_size):
co = self.proj_conf.num_filters
ci = self.proj_conf.conv_conf.channels
fh = self.proj_conf.conv_conf.filter_size
fw = self.proj_conf.conv_conf.filter_size_y
gr = self.proj_conf.conv_conf.groups
return co * ci * fh * fw / gr
def calc_bias_size(self):
return self.proj_conf.num_filters
def calc_parameter_dims(self, input_size, output_size):
return None
@config_class
class ConvProjection(ConvBaseProjection):
type = 'conv'
def __init__(self,
input_layer_name,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvProjection, self).__init__(input_layer_name, num_filters,
conv_conf, **xargs)
parse_conv(conv_conf, self.input_layer_name, self.proj_conf.conv_conf,
num_filters)
self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \
self.proj_conf.conv_conf.output_y * \
num_filters
@config_class
class ConvTransProjection(ConvBaseProjection):
type = 'convt'
def __init__(self,
input_layer_name,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvTransProjection, self).__init__(input_layer_name, num_filters,
conv_conf, **xargs)
parse_conv(
conv_conf,
self.input_layer_name,
self.proj_conf.conv_conf,
num_filters,
trans=True)
self.proj_conf.output_size = self.proj_conf.conv_conf.img_size_y * \
self.proj_conf.conv_conf.img_size * \
num_filters
# Define a operator for mixed layer
@config_class
class Operator(Cfg):
type = None # subclass should set it correctly
def __init__(
self,
input_layer_names, ):
self.add_keys(locals())
self.operator_conf = OperatorConfig()
self.operator_conf.type = self.type
def check_dims(self):
pass
def calc_output_size(self, input_sizes):
return 0
@config_class
class DotMulOperator(Operator):
type = 'dot_mul'
def __init__(self, input_layer_names, scale=None, **xargs):
super(DotMulOperator, self).__init__(input_layer_names, **xargs)
if scale is not None:
self.operator_conf.dotmul_scale = scale
config_assert(len(input_layer_names) == 2, "DotMul is binary operator")
def check_dims(self):
for i in range(2):
config_assert(self.operator_conf.input_sizes[i] ==
self.operator_conf.output_size,
"DotMul input_size != output_size")
def calc_output_size(self, input_sizes):
return input_sizes[0]
@config_class
class ConvOperator(Operator):
type = 'conv'
def __init__(self,
input_layer_names,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvOperator, self).__init__(input_layer_names, **xargs)
if num_filters is not None:
self.operator_conf.num_filters = num_filters
parse_conv(conv_conf,
MakeLayerNameInSubmodel(input_layer_names[0]),
self.operator_conf.conv_conf, num_filters)
self.operator_conf.output_size = self.operator_conf.conv_conf.output_x * \
self.operator_conf.conv_conf.output_y * \
num_filters
config_assert(len(input_layer_names) == 2, "Conv is binary operator")
def calc_output_size(self, input_sizes):
return self.operator_conf.output_size
@config_class
class ConvTransOperator(Operator):
type = 'convt'
def __init__(self,
input_layer_names,
num_filters=None,
conv_conf=None,
**xargs):
super(ConvTransOperator, self).__init__(input_layer_names, **xargs)
if num_filters is not None:
self.operator_conf.num_filters = num_filters
parse_conv(
conv_conf,
MakeLayerNameInSubmodel(input_layer_names[0]),
self.operator_conf.conv_conf,
num_filters,
trans=True)
self.operator_conf.output_size = \
self.operator_conf.conv_conf.img_size * \
self.operator_conf.conv_conf.img_size_y * \
num_filters
config_assert(len(input_layer_names) == 2, "Conv is binary operator")
def calc_output_size(self, input_sizes):
return self.operator_conf.output_size
# please refer to the comments in proto/ModelConfig.proto
@config_class
class Conv(Cfg):
def __init__(self,
filter_size,
channels,
padding=None,
stride=None,
groups=None,
filter_channels=None,
output_x=None,
img_size=None,
caffe_mode=True,
filter_size_y=None,
padding_y=None,
stride_y=None,
dilation=None,
dilation_y=None):
self.add_keys(locals())
if filter_size_y is None:
self.filter_size_y = filter_size
if padding_y is None:
self.padding_y = padding
if dilation_y is None:
self.dilation_y = dilation
if stride_y is None:
self.stride_y = stride
if output_x is not None:
config_assert(output_x <= 0)
# please refer to the comments in proto/ModelConfig.proto
@config_class
class Conv3D(Cfg):
def __init__(self,
filter_size,
channels,
padding=None,
stride=None,
groups=None,
filter_channels=None,
output_x=None,
img_size=None,
caffe_mode=True,
filter_size_y=None,
padding_y=None,
stride_y=None,
filter_size_z=None,
padding_z=None,
stride_z=None):
self.add_keys(locals())
self.filter_size_y = filter_size_y if filter_size_y else filter_size
self.filter_size_z = filter_size_z if filter_size_z else filter_size
self.padding_y = padding_y if padding_y else padding
self.padding_z = padding_z if padding_z else padding
self.stride_y = stride_y if stride_y else stride
self.stride_z = stride_z if stride_z else stride
if output_x is not None:
config_assert(output_x <= 0)
@config_class
class BilinearInterp(Cfg):
def __init__(self, out_size_x=None, out_size_y=None, channels=None):
self.add_keys(locals())
@config_class
class Pool(Cfg):
def __init__(
self,
pool_type,
channels,
size_x,
size_y=None,
start=None,
stride=None, # 1 by defalut in protobuf
stride_y=None,
padding=None, # 0 by defalut in protobuf
padding_y=None):
self.add_keys(locals())
@config_class
class Pool3d(Cfg):
def __init__(
self,
pool_type,
channels,
size_x,
size_y=None,
size_z=None,
start=None,
stride=None, # 1 by defalut in protobuf
stride_y=None,
stride_z=None,
padding=None, # 0 by defalut in protobuf
padding_y=None,
padding_z=None):
self.add_keys(locals())
self.filter_size_y = size_y if size_y else size_x
self.filter_size_z = size_z if size_z else size_x
self.padding_y = padding_y if padding_y else padding
self.padding_z = padding_z if padding_z else padding
self.stride_y = stride_y if stride_y else stride
self.stride_z = stride_z if stride_z else stride
@config_class
class SpatialPyramidPool(Cfg):
def __init__(self, pool_type, pyramid_height, channels):
self.add_keys(locals())
@config_class
class Pad(Cfg):
def __init__(self, channels, pad_c, pad_h, pad_w):
self.add_keys(locals())
@config_class
class Upsample(Cfg):
def __init__(self, scale, scale_y, pad_out_x, pad_out_y, upsample_size,
upsample_size_y):
self.add_keys(locals())
@config_class
class Norm(Cfg):
def __init__(self,
norm_type,
channels,
size,
scale,
pow,
output_x=None,
img_size=None,
blocked=None):
self.add_keys(locals())
@config_class
class Image(Cfg):
def __init__(self, channels, img_size=None):
self.add_keys(locals())
@config_class
class BlockExpand(Cfg):
def __init__(self,
channels,
padding_x=0,
padding_y=0,
stride_x=0,
stride_y=0,
block_x=0,
block_y=0,
img_size_x=0,
img_size_y=0,
output_x=0,
output_y=0):
self.add_keys(locals())
@config_class
class MaxOut(Cfg):
def __init__(self, channels, groups, img_size_x=0, img_size_y=0):
self.add_keys(locals())
def create_data_config_proto(async_load_data=False,
constant_slots=None,
data_ratio=1,
is_main_data=True,
usage_ratio=None):
# default: all sub dataproviders are treat as "main data".
# see proto/DataConfig.proto for is_main_data
data_config = DataConfig()
data_config.async_load_data = async_load_data
if constant_slots:
data_config.constant_slots.extend(constant_slots)
data_config.data_ratio = data_ratio
data_config.is_main_data = is_main_data
usage_ratio = default(usage_ratio, settings_deprecated["usage_ratio"])
config_assert(usage_ratio >= 0 and usage_ratio <= 1,
"The range of usage_ratio is [0, 1]")
data_config.usage_ratio = usage_ratio
return data_config
@config_func
def SimpleData(files=None,
feat_dim=None,
context_len=None,
buffer_capacity=None,
**xargs):
data_config = create_data_config_proto(**xargs)
data_config.type = 'simple'
data_config.files = files
data_config.feat_dim = feat_dim
if context_len is not None:
data_config.context_len = context_len
if buffer_capacity:
data_config.buffer_capacity = buffer_capacity
return data_config
@config_func
def PyData(files=None,
type=None,
file_group_queue_capacity=None,
load_data_module=None,
load_data_object=None,
load_data_args="",
load_file_count=None,
constant_slots=None,
load_thread_num=None,
**xargs):
data_config = create_data_config_proto(**xargs)
data_config.type = 'py'
if load_data_module in g_py_module_name_list:
def get_path(module):
m = __import__(load_data_module)
return os.path.split(os.path.realpath(m.__file__))[0]
# python C-api is not thread safe, one module can only be import once,
# so here we nedd to copy the module with different names if it has to be
# imported several times.
module_new_name = "%s_copy_%d" % (load_data_module,
len(g_py_module_name_list))
g_py_module_name_list.append(module_new_name)
module_path = "%s/%s.py" % (get_path(load_data_module),
load_data_module)
new_module_path = "%s/%s.py" % (get_path(load_data_module),
module_new_name)
if os.path.isfile(module_path) == False:
raise Exception("File %s is not exist." % module_path)
shutil.copy2(module_path, new_module_path)
load_data_module = module_new_name
else:
g_py_module_name_list.append(load_data_module)
if load_data_module is not None and load_data_object is not None:
data_config.load_data_module = load_data_module
data_config.load_data_object = load_data_object
else:
raise ValueError('load_data_module, load_data_object is not defined.')
data_config.load_data_args = load_data_args
data_config.files = files or ''
if file_group_queue_capacity is not None:
data_config.file_group_conf.queue_capacity = file_group_queue_capacity
if load_file_count is not None:
data_config.file_group_conf.load_file_count = load_file_count
if load_thread_num is not None:
data_config.file_group_conf.load_thread_num = load_thread_num
if constant_slots:
data_config.constant_slots.extend(constant_slots)
return data_config
#real data for training is actually provided by "sub_data" data providers.
@config_func
def MultiData(sub_data=[]):
data_config = DataConfig()
data_config.type = 'multi'
data_config.sub_data_configs.extend(sub_data)
return data_config
@config_func
def Data(type,
files=None,
feat_dim=None,
slot_dims=None,
context_len=None,
buffer_capacity=None,
**xargs):
data_config = create_data_config_proto(**xargs)
data_config.type = type
data_config.files = files
data_config.feat_dim = feat_dim
data_config.slot_dims.extend(slot_dims)
if context_len is not None:
data_config.context_len = context_len
data_config.buffer_capacity = buffer_capacity
return data_config
@config_func
def TrainData(data_config, async_load_data=None):
config_assert(not g_config.HasField('data_config'),
'Only one TrainData definition is allowed')
g_config.data_config.CopyFrom(data_config)
g_config.data_config.for_test = False
if async_load_data is not None:
logger.warning("Deprecated: async_load_data should be used inside"
" Data definition")
g_config.data_config.async_load_data = async_load_data
@config_func
def TestData(data_config, async_load_data=None):
config_assert(not g_config.HasField('test_data_config'),
'Only one TestData definition is allowed')
g_config.test_data_config.CopyFrom(data_config)
g_config.test_data_config.for_test = True
if async_load_data is not None:
logger.warning("Deprecated: async_load_data should be used inside"
" Data definition")
g_config.test_data_config.async_load_data = async_load_data
#caffe_mode: compute the output size using floor instead of ceil,
# which is consistent of caffe and CuDNN's convention.
def cnn_output_size(img_size,
filter_size,
padding,
stride,
caffe_mode,
dilation=1):
filter_s = (filter_size - 1) * dilation + 1
output = (2 * padding + img_size - filter_s) / float(stride)
if caffe_mode:
return 1 + int(math.floor(output))
else:
return 1 + int(math.ceil(output))
#calcualte image_size based on output_size for de-convolution (ConvTransLayer).
#It is the reverse function of cnn_output_size
def cnn_image_size(output_size,
filter_size,
padding,
stride,
caffe_mode,
dilation=1):
filter_s = (filter_size - 1) * dilation + 1
img_size = (output_size - 1) * stride + filter_s - 2 * padding
if not caffe_mode:
img_size = img_size + 1
return img_size
def get_img_size(input_layer_name, channels):
input = g_layer_map[input_layer_name]
img_pixels = input.size / channels
img_size = input.width if input.width > 0 else int(img_pixels**0.5)
img_size_y = input.height if input.height > 0 else int(img_pixels /
img_size)
config_assert(
img_size * img_size_y == img_pixels,
"Input layer %s: Incorrect input image size %d * %d for input image pixels %d"
% (input_layer_name, img_size, img_size_y, img_pixels))
return img_size, img_size_y
def get_img3d_size(input_layer_name, channels):
input = g_layer_map[input_layer_name]
img_pixels = input.size / channels
img_size = input.width
img_size_y = input.height
img_size_z = input.depth
config_assert(
img_size * img_size_y * img_size_z == img_pixels,
"Input layer %s: Incorrect input image size %d * %d * %d for input image pixels %d"
% (input_layer_name, img_size, img_size_y, img_size_z, img_pixels))
return img_size, img_size_y, img_size_z
def parse_bilinear(bilinear, input_layer_name, bilinear_conf):
parse_image(bilinear, input_layer_name, bilinear_conf.image_conf)
bilinear_conf.out_size_x = bilinear.out_size_x
bilinear_conf.out_size_y = bilinear.out_size_y
def parse_pool(pool, input_layer_name, pool_conf, ceil_mode, exclude_mode):
pool_conf.pool_type = pool.pool_type
config_assert(pool.pool_type in [
'max-projection', 'avg-projection', 'max-pool-with-mask', 'cudnn-max-pool', 'cudnn-avg-pool'
], "pool-type %s is not in " \
"['max-projection', 'avg-projection', 'max-pool-with-mask'," \
"'cudnn-max-pool', 'cudnn-avg-pool']" % pool.pool_type)
pool_conf.channels = pool.channels
pool_conf.size_x = pool.size_x
pool_conf.stride = pool.stride
pool_conf.size_y = default(pool.size_y, pool_conf.size_x)
pool_conf.stride_y = default(pool.stride_y, pool_conf.stride)
pool_conf.img_size, pool_conf.img_size_y = \
get_img_size(input_layer_name, pool.channels)
config_assert(not pool.start, "start is deprecated in pooling.")
if pool.padding is not None:
pool_conf.padding = pool.padding
pool_conf.padding_y = default(pool.padding_y, pool_conf.padding)
pool_conf.output_x = cnn_output_size(pool_conf.img_size, pool_conf.size_x,
pool_conf.padding, pool_conf.stride,
not ceil_mode)
pool_conf.output_y = cnn_output_size(pool_conf.img_size_y, pool_conf.size_y,
pool_conf.padding_y,
pool_conf.stride_y, not ceil_mode)
if exclude_mode != None:
pool_conf.exclude_mode = exclude_mode
def parse_pool3d(pool, input_layer_name, pool_conf, ceil_mode):
pool_conf.pool_type = pool.pool_type
config_assert(pool.pool_type in ['max-projection', 'avg-projection'],
"pool-type %s is not in "
"['max-projection', 'avg-projection']" % pool.pool_type)
pool_conf.channels = pool.channels
pool_conf.size_x = pool.size_x
pool_conf.stride = pool.stride
pool_conf.padding = pool.padding
pool_conf.size_y = default(pool.size_y, pool_conf.size_x)
pool_conf.size_z = default(pool.size_z, pool_conf.size_x)
pool_conf.stride_y = default(pool.stride_y, pool_conf.stride)
pool_conf.stride_z = default(pool.stride_z, pool_conf.stride)
pool_conf.padding_y = default(pool.padding_y, pool_conf.padding)
pool_conf.padding_z = default(pool.padding_z, pool_conf.padding)
pool_conf.img_size, pool_conf.img_size_y, pool_conf.img_size_z = \
get_img3d_size(input_layer_name, pool.channels)
config_assert(not pool.start, "start is deprecated in pooling.")
if pool.padding is not None:
pool_conf.padding = pool.padding
pool_conf.padding_y = default(pool.padding_y, pool_conf.padding)
pool_conf.padding_z = default(pool.padding_z, pool_conf.padding)
pool_conf.output_x = cnn_output_size(pool_conf.img_size, pool_conf.size_x,
pool_conf.padding, pool_conf.stride,
not ceil_mode)
pool_conf.output_y = cnn_output_size(pool_conf.img_size_y, pool_conf.size_y,
pool_conf.padding_y,
pool_conf.stride_y, not ceil_mode)
pool_conf.output_z = cnn_output_size(pool_conf.img_size_z, pool_conf.size_z,
pool_conf.padding_z,
pool_conf.stride_z, not ceil_mode)
def parse_spp(spp, input_layer_name, spp_conf):
parse_image(spp, input_layer_name, spp_conf.image_conf)
spp_conf.pool_type = spp.pool_type
config_assert(spp.pool_type in ['max-projection', 'avg-projection'],
"pool-type %s is not in "
"['max-projection', 'avg-projection']" % spp.pool_type)
spp_conf.pyramid_height = spp.pyramid_height
def parse_image(image, input_layer_name, image_conf):
image_conf.channels = image.channels
image_conf.img_size, image_conf.img_size_y = \
get_img_size(input_layer_name, image_conf.channels)
def parse_image3d(image, input_layer_name, image_conf):
image_conf.channels = image.channels
image_conf.img_size, image_conf.img_size_y, image_conf.img_size_z = \
get_img3d_size(input_layer_name, image_conf.channels)
def parse_norm(norm, input_layer_name, norm_conf):
norm_conf.norm_type = norm.norm_type
config_assert(
norm.norm_type in
['rnorm', 'cmrnorm-projection', 'cross-channel-norm'],
"norm-type %s is not in [rnorm, cmrnorm-projection, cross-channel-norm]"
% norm.norm_type)
norm_conf.channels = norm.channels
norm_conf.size = norm.size
norm_conf.scale = norm.scale
norm_conf.pow = norm.pow
norm_conf.blocked = norm.blocked
norm_conf.img_size, norm_conf.img_size_y = \
get_img_size(input_layer_name, norm.channels)
norm_conf.output_x = norm_conf.img_size
norm_conf.output_y = norm_conf.img_size_y
if norm.norm_type in ['cmrnorm-projection']:
norm_conf.scale /= norm.size
else:
norm_conf.scale /= norm.size**2
#caffe_mode: compute the output size using floor instead of ceil,
# which is consistent of caffe and CuDNN's convention.
def parse_conv(conv, input_layer_name, conv_conf, num_filters, trans=False):
conv_conf.filter_size = conv.filter_size
conv_conf.filter_size_y = conv.filter_size_y
conv_conf.channels = conv.channels
conv_conf.padding = conv.padding
conv_conf.padding_y = conv.padding_y
conv_conf.stride = conv.stride
conv_conf.stride_y = conv.stride_y
conv_conf.groups = conv.groups
conv_conf.caffe_mode = conv.caffe_mode
if not conv.dilation:
conv.dilation = 1
conv.dilation_y = 1
else:
conv_conf.dilation = conv.dilation
conv_conf.dilation_y = conv.dilation_y
if not trans:
conv_conf.filter_channels = conv.channels / conv.groups
conv_conf.img_size, conv_conf.img_size_y = \
get_img_size(input_layer_name, conv.channels)
conv_conf.output_x = cnn_output_size(
conv_conf.img_size, conv_conf.filter_size, conv_conf.padding,
conv_conf.stride, conv_conf.caffe_mode, conv.dilation)
conv_conf.output_y = cnn_output_size(
conv_conf.img_size_y, conv_conf.filter_size_y, conv_conf.padding_y,
conv_conf.stride_y, conv_conf.caffe_mode, conv.dilation_y)
else:
conv_conf.filter_channels = num_filters / conv.groups
conv_conf.output_x, conv_conf.output_y = \
get_img_size(input_layer_name, conv.channels)
conv_conf.img_size = cnn_image_size(
conv_conf.output_x, conv_conf.filter_size, conv_conf.padding,
conv_conf.stride, conv_conf.caffe_mode, conv.dilation)
conv_conf.img_size_y = cnn_image_size(
conv_conf.output_y, conv_conf.filter_size_y, conv_conf.padding_y,
conv_conf.stride_y, conv_conf.caffe_mode, conv.dilation_y)
#caffe_mode: compute the output size using floor instead of ceil,
# which is consistent of caffe and CuDNN's convention.
def parse_conv3d(conv, input_layer_name, conv_conf, num_filters, trans=False):
conv_conf.filter_size = conv.filter_size
conv_conf.filter_size_y = conv.filter_size_y
conv_conf.filter_size_z = conv.filter_size_z
conv_conf.channels = conv.channels
conv_conf.padding = conv.padding
conv_conf.padding_y = conv.padding_y
conv_conf.padding_z = conv.padding_z
conv_conf.stride = conv.stride
conv_conf.stride_y = conv.stride_y
conv_conf.stride_z = conv.stride_z
conv_conf.groups = conv.groups
conv_conf.caffe_mode = conv.caffe_mode
if not trans:
conv_conf.filter_channels = conv.channels / conv.groups
conv_conf.img_size, conv_conf.img_size_y, conv_conf.img_size_z = \
get_img3d_size(input_layer_name, conv.channels)
conv_conf.output_x = cnn_output_size(
conv_conf.img_size, conv_conf.filter_size, conv_conf.padding,
conv_conf.stride, conv_conf.caffe_mode)
conv_conf.output_y = cnn_output_size(
conv_conf.img_size_y, conv_conf.filter_size_y, conv_conf.padding_y,
conv_conf.stride_y, conv_conf.caffe_mode)
conv_conf.output_z = cnn_output_size(
conv_conf.img_size_z, conv_conf.filter_size_z, conv_conf.padding_z,
conv_conf.stride_z, conv_conf.caffe_mode)
else:
conv_conf.filter_channels = num_filters / conv.groups
conv_conf.output_x, conv_conf.output_y, conv_conf.output_z = \
get_img3d_size(input_layer_name, conv.channels)
conv_conf.img_size = cnn_image_size(
conv_conf.output_x, conv_conf.filter_size, conv_conf.padding,
conv_conf.stride, conv_conf.caffe_mode)
conv_conf.img_size_y = cnn_image_size(
conv_conf.output_y, conv_conf.filter_size_y, conv_conf.padding_y,
conv_conf.stride_y, conv_conf.caffe_mode)
conv_conf.img_size_z = cnn_image_size(
conv_conf.output_z, conv_conf.filter_size_z, conv_conf.padding_z,
conv_conf.stride_z, conv_conf.caffe_mode)
def parse_block_expand(block_expand, input_layer_name, block_expand_conf):
block_expand_conf.channels = block_expand.channels
block_expand_conf.stride_x = block_expand.stride_x
block_expand_conf.stride_y = block_expand.stride_y
block_expand_conf.padding_x = block_expand.padding_x
block_expand_conf.padding_y = block_expand.padding_y
block_expand_conf.block_x = block_expand.block_x
block_expand_conf.block_y = block_expand.block_y
block_expand_conf.img_size_x = block_expand.img_size_x
block_expand_conf.img_size_y = block_expand.img_size_y
if block_expand_conf.img_size_x == 0:
block_expand_conf.output_x = 0
else:
block_expand_conf.output_x = cnn_output_size(
block_expand.img_size_x, block_expand.block_x,
block_expand.padding_x, block_expand.stride_x, False)
if block_expand_conf.img_size_y == 0:
block_expand_conf.output_y = 0
else:
block_expand_conf.output_y = cnn_output_size(
block_expand.img_size_y, block_expand.block_y,
block_expand.padding_y, block_expand.stride_y, False)
def parse_maxout(maxout, input_layer_name, maxout_conf):
parse_image(maxout, input_layer_name, maxout_conf.image_conf)
maxout_conf.groups = maxout.groups
# Define an evaluator
@config_func
def Evaluator(name,
type,
inputs,
chunk_scheme=None,
num_chunk_types=None,
classification_threshold=None,
positive_label=None,
dict_file=None,
result_file=None,
num_results=None,
top_k=None,
delimited=None,
excluded_chunk_types=None,
overlap_threshold=None,
background_id=None,
evaluate_difficult=None,
ap_type=None):
evaluator = g_config.model_config.evaluators.add()
evaluator.type = type
evaluator.name = MakeLayerNameInSubmodel(name)
if type_of(inputs) == str:
inputs = [inputs]
evaluator.input_layers.extend(
[MakeLayerNameInSubmodel(name) for name in inputs])
if chunk_scheme is not None:
evaluator.chunk_scheme = chunk_scheme
evaluator.num_chunk_types = num_chunk_types
g_current_submodel.evaluator_names.append(evaluator.name)
if classification_threshold is not None:
evaluator.classification_threshold = classification_threshold
if positive_label is not None:
evaluator.positive_label = positive_label
if dict_file is not None:
evaluator.dict_file = dict_file
if result_file is not None:
evaluator.result_file = result_file
if num_results is not None:
evaluator.num_results = num_results
if top_k is not None:
evaluator.top_k = top_k
if delimited is not None:
evaluator.delimited = delimited
if excluded_chunk_types:
evaluator.excluded_chunk_types.extend(excluded_chunk_types)
if overlap_threshold is not None:
evaluator.overlap_threshold = overlap_threshold
if background_id is not None:
evaluator.background_id = background_id
if evaluate_difficult is not None:
evaluator.evaluate_difficult = evaluate_difficult
if ap_type is not None:
evaluator.ap_type = ap_type
class LayerBase(object):
def __init__(
self,
name,
type,
size, # size can be 0. In this case, subclass should set it.
inputs,
device=None,
active_type="",
drop_rate=0.,
coeff=None,
error_clipping_threshold=None):
config_assert('@' not in name,
"layer name: %s contain special character @" % name)
global g_current_submodel
name = MakeLayerNameInSubmodel(name)
config_assert(name not in g_layer_map,
'Duplicated layer name: %s' % name)
self.inputs = copy.deepcopy(inputs)
self.operators = []
if self.inputs is None:
self.inputs = []
elif type_of(self.inputs) != list:
self.inputs = [self.inputs]
self.config = g_config.model_config.layers.add()
assert isinstance(self.config, LayerConfig)
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
mkldnn_acts = ['relu', 'tanh', 'softmax']
if use_mkldnn and active_type in mkldnn_acts:
active_type = "mkldnn_" + active_type
self.config.name = name
self.config.type = type
self.config.active_type = active_type
if coeff is not None:
self.config.coeff = float(coeff)
if size != 0:
self.config.size = size
if drop_rate != 0:
self.config.drop_rate = drop_rate
if device is not None:
self.config.device = device
elif g_default_device is not None:
self.config.device = g_default_device
if error_clipping_threshold is not None:
self.config.error_clipping_threshold = error_clipping_threshold
for input_index in xrange(len(self.inputs)):
input = self.inputs[input_index]
input_config = None
input_layer_name = ''
if type_of(input) == str:
input_layer_name = input
input_config = Input(
input_layer_name=input,
parameter_name=gen_parameter_name(name, input_index))
input_layer_name = input_config.input_layer_name
elif isinstance(input, Input):
input_layer_name = input.input_layer_name
input_config = input
if input_config.parameter_name is None:
input_config.parameter_name = \
gen_parameter_name(name, input_index)
elif isinstance(input, Operator):
self.operators.append(input)
input.operator_conf.input_indices.append(input_index)
input_config = Input(input.input_layer_names[0])
input_layer_name = input_config.input_layer_name
else:
raise ValueError('Wrong type for inputs: %s' % type_of(input))
config_assert(input_layer_name in g_layer_map,
"Unknown input layer '%s' for layer %s" %
(input_layer_name, name))
self.inputs[input_index] = input_config
layer_input = self.config.inputs.add()
layer_input.input_layer_name = input_config.input_layer_name
if input_config.input_layer_argument is not None:
layer_input.input_layer_argument = \
input_config.input_layer_argument
g_layer_map[name] = self.config
g_current_submodel.layer_names.append(self.config.name)
def get_input_layer(self, input_index):
return g_layer_map[self.config.inputs[input_index].input_layer_name]
# will return the bias created if not *for_self*
def create_bias_parameter(
self,
bias, # True/False or BiasCfg
size,
dims=None,
for_self=True, # whether create bias for layer self
):
if size == 0:
return
if dims is None:
dims = [1, size]
config_assert(
type_of(bias) == bool or type_of(bias) == Bias,
'Incorrect type for bias: %s' % type_of(bias))
if type_of(bias) == bool:
if bias:
bias = Bias()
if type_of(bias) == Bias:
if bias.parameter_name is None:
bias.parameter_name = gen_bias_parameter_name(self.config.name)
if bias.parameter_name not in g_parameter_map:
assert isinstance(self.config, LayerConfig)
Parameter(
bias.parameter_name,
size,
self.config.device
if self.config.HasField('device') else None,
dims,
bias.learning_rate,
bias.momentum,
decay_rate=bias.decay_rate,
decay_rate_l1=bias.decay_rate_l1,
initial_mean=bias.initial_mean,
initial_std=bias.initial_std,
initial_strategy=bias.initial_strategy,
initial_smart=bias.initial_smart,
num_batches_regularization=bias.num_batches_regularization,
sparse_remote_update=bias.sparse_remote_update,
gradient_clipping_threshold=bias.
gradient_clipping_threshold,
is_static=bias.is_static,
is_shared=bias.is_shared,
initializer=bias.initializer)
if for_self:
self.config.bias_parameter_name = bias.parameter_name
else:
return bias.parameter_name
def create_input_parameter(self,
input_index,
size,
dims=None,
sparse=None,
format=None):
if dims is None:
# TODO(yuyang18): print warning and callstack here!
dims = list()
if size == 0:
return
input_config = self.inputs[input_index]
self.config.inputs[input_index].input_parameter_name = \
input_config.parameter_name
if input_config.parameter_name in g_parameter_map:
para = g_parameter_map[input_config.parameter_name]
config_assert(size == para.size, (
'Shared parameter "%s" does not ' + 'have same size: %s vs. %s')
% (input_config.parameter_name, para.size, size))
config_assert(dims == para.dims, (
'Shared parameter "%s" does not ' + 'have same dims: %s vs. %s')
% (input_config.parameter_name, para.dims, dims))
return
Parameter(
input_config.parameter_name,
size,
self.config.device if self.config.HasField("device") else None,
dims,
input_config.learning_rate,
input_config.momentum,
decay_rate=input_config.decay_rate,
decay_rate_l1=input_config.decay_rate_l1,
initial_mean=input_config.initial_mean,
initial_std=input_config.initial_std,
initial_strategy=input_config.initial_strategy,
initial_smart=input_config.initial_smart,
num_batches_regularization=input_config.num_batches_regularization,
sparse_remote_update=input_config.sparse_remote_update,
sparse_update=input_config.sparse_update,
gradient_clipping_threshold=input_config.
gradient_clipping_threshold,
sparse=sparse,
format=format,
is_static=input_config.is_static,
is_shared=input_config.is_shared,
update_hooks=input_config.update_hooks,
initializer=input_config.initializer)
def set_layer_size(self, size):
if self.config.size == 0:
self.config.size = size
else:
config_assert(self.config.size == size,
'Different inputs result in' +
'different layer size at layer %s' % self.config.name)
def set_layer_height_width(self, height, width):
self.config.height = height
self.config.width = width
def set_layer_depth(self, depth):
self.config.depth = depth
def set_cnn_layer(self,
input_layer_name,
height,
width,
channels,
is_print=True):
size = height * width * channels
self.set_layer_size(size)
self.set_layer_height_width(height, width)
if is_print:
print("output for %s: c = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, height, width, size))
@config_layer('multi_class_cross_entropy_with_selfnorm')
class MultiClassCrossEntropySelfNormCostLayer(LayerBase):
def __init__(self, name, inputs, softmax_selfnorm_alpha=0.1, **xargs):
super(MultiClassCrossEntropySelfNormCostLayer, self).__init__(
name, 'multi_class_cross_entropy_with_selfnorm', 0, inputs, **xargs)
self.config.softmax_selfnorm_alpha = softmax_selfnorm_alpha
@config_layer('cross_entropy_over_beam')
class CrossEntropyOverBeamLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
config_assert(len(inputs) % 3 == 0, "Error input number.")
super(CrossEntropyOverBeamLayer, self).__init__(
name, 'cross_entropy_over_beam', 0, inputs, **xargs)
input_num = len(inputs) / 3
for i in range(input_num):
input_layer = self.get_input_layer(i * 3)
config_assert(input_layer.size == 1, (
"Inputs for this layer are made up of "
"several triples, in which the first one is scores over "
"all candidate paths, whose size should be equal to 1."))
@config_layer('fc')
class FCLayer(LayerBase):
layer_type = 'fc'
def __init__(self,
name,
size,
inputs,
bias=True,
error_clipping_threshold=None,
**xargs):
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
use_mkldnn_wgt = bool(
int(g_command_config_args.get("use_mkldnn_wgt", 0)))
if use_mkldnn:
self.layer_type = 'mkldnn_fc'
config_assert(
len(inputs) == 1,
"MKLDNNFCLayer support one and only one input!")
super(FCLayer, self).__init__(
name, self.layer_type, size, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
psize = self.config.size * input_layer.size
dims = [input_layer.size, self.config.size]
format = self.inputs[input_index].format
sparse = format == "csr" or format == "csc"
if use_mkldnn:
config_assert(not sparse,
"MKLDNNFCLayer do not support sparse format yet")
if use_mkldnn_wgt:
dims = [self.config.size, input_layer.size]
if sparse:
psize = self.inputs[input_index].nnz
else:
sparse = None
self.create_input_parameter(input_index, psize, dims, sparse,
format)
self.create_bias_parameter(bias, self.config.size)
if error_clipping_threshold is not None:
self.config.error_clipping_threshold = error_clipping_threshold
@config_layer('mkldnn_fc')
class MKLDNNFcLayer(FCLayer):
layer_type = 'mkldnn_fc'
@config_layer('selective_fc')
class SelectiveFCLayer(LayerBase):
def __init__(self,
name,
size,
inputs,
bias=True,
selective_fc_pass_generation=False,
has_selected_colums=True,
selective_fc_full_mul_ratio=0.02,
selective_fc_parallel_plain_mul_thread_num=None,
**xargs):
super(SelectiveFCLayer, self).__init__(
name, 'selective_fc', size, inputs=inputs, **xargs)
# user MUST know if selctive fc is used in training,
# parameter matrices saved by this layer are automatically transposed,
# BUT bias is not.
# if selective_fc is used only in testing mode, and parameters for
# this layer are trained by fully connected layers,
# then TranposedFullMatrixProjectin MUST be used in training
# to avoid manual transpose in testing.
self.config.selective_fc_pass_generation = selective_fc_pass_generation
self.config.has_selected_colums = has_selected_colums
self.config.selective_fc_full_mul_ratio = selective_fc_full_mul_ratio
if selective_fc_parallel_plain_mul_thread_num is not None:
self.config.selective_fc_parallel_plain_mul_thread_num = selective_fc_parallel_plain_mul_thread_num
input_num = len(self.inputs)
if has_selected_colums:
config_assert(input_num >= 2,
("if indices of selected columns are not specified, "
"selective_fc Layer has at least two inputs"))
input_num -= 1
for input_index in xrange(input_num):
input_layer = self.get_input_layer(input_index)
psize = self.config.size * input_layer.size
dims = [input_layer.size, self.config.size]
dims = dims[::-1] # transpose the parameter
format = self.inputs[input_index].format
sparse = format == "csr" or format == "csc"
if sparse:
psize = self.inputs[input_index].nnz
self.create_input_parameter(input_index, psize, dims, sparse,
format)
self.create_bias_parameter(bias, self.config.size)
@config_layer('print')
class PrintLayer(LayerBase):
def __init__(self, name, inputs, format=None):
super(PrintLayer, self).__init__(name, 'print', 0, inputs)
if format is None:
format = "\n".join([
"layer=" + input.input_layer_name + " %s"
for input in self.inputs
])
self.config.user_arg = format
@config_layer('priorbox')
class PriorBoxLayer(LayerBase):
def __init__(self, name, inputs, size, min_size, max_size, aspect_ratio,
variance):
super(PriorBoxLayer, self).__init__(name, 'priorbox', 0, inputs)
config_assert(len(inputs) == 2, 'PriorBoxLayer must have 2 inputs')
input_layer = self.get_input_layer(1)
config_assert(
input_layer.type == 'data',
'Expecting the second input layer of an priorbox layer to be '
'a data layer')
config_assert(input_layer.width > 0, 'The data layer must set width')
config_assert(input_layer.height > 0, 'The data layer must set height')
config_assert(len(variance) == 4, 'The variance must have 4 inputs')
self.config.inputs[0].priorbox_conf.min_size.extend(min_size)
self.config.inputs[0].priorbox_conf.max_size.extend(max_size)
self.config.inputs[0].priorbox_conf.aspect_ratio.extend(aspect_ratio)
self.config.inputs[0].priorbox_conf.variance.extend(variance)
self.config.size = size
@config_layer('multibox_loss')
class MultiBoxLossLayer(LayerBase):
def __init__(self, name, inputs, input_num, num_classes, overlap_threshold,
neg_pos_ratio, neg_overlap, background_id, **xargs):
super(MultiBoxLossLayer, self).__init__(name, 'multibox_loss', 0,
inputs)
config_assert(
len(inputs) == (input_num * 2 + 2),
'MultiBoxLossLayer does not have enough inputs')
config_assert(num_classes > background_id,
'Classes number must greater than background ID')
self.config.inputs[0].multibox_loss_conf.num_classes = num_classes
self.config.inputs[
0].multibox_loss_conf.overlap_threshold = overlap_threshold
self.config.inputs[0].multibox_loss_conf.neg_pos_ratio = neg_pos_ratio
self.config.inputs[0].multibox_loss_conf.neg_overlap = neg_overlap
self.config.inputs[0].multibox_loss_conf.background_id = background_id
self.config.inputs[0].multibox_loss_conf.input_num = input_num
self.config.size = 1
@config_layer('detection_output')
class DetectionOutputLayer(LayerBase):
def __init__(self, name, inputs, size, input_num, num_classes,
nms_threshold, nms_top_k, keep_top_k, confidence_threshold,
background_id, **xargs):
super(DetectionOutputLayer, self).__init__(name, 'detection_output', 0,
inputs)
config_assert(
len(inputs) == (input_num * 2 + 1),
'DetectionOutputLayer does not have enough inputs')
config_assert(num_classes > background_id,
'Classes number must greater than background ID')
self.config.inputs[0].detection_output_conf.num_classes = num_classes
self.config.inputs[
0].detection_output_conf.nms_threshold = nms_threshold
self.config.inputs[0].detection_output_conf.nms_top_k = nms_top_k
self.config.inputs[0].detection_output_conf.keep_top_k = keep_top_k
self.config.inputs[
0].detection_output_conf.confidence_threshold = confidence_threshold
self.config.inputs[
0].detection_output_conf.background_id = background_id
self.config.inputs[0].detection_output_conf.input_num = input_num
self.config.size = size
@config_layer('roi_pool')
class ROIPoolLayer(LayerBase):
def __init__(self, name, inputs, pooled_width, pooled_height, spatial_scale,
num_channels, **xargs):
super(ROIPoolLayer, self).__init__(name, 'roi_pool', 0, inputs)
config_assert(len(inputs) == 2, 'ROIPoolLayer must have 2 inputs')
self.config.inputs[0].roi_pool_conf.pooled_width = pooled_width
self.config.inputs[0].roi_pool_conf.pooled_height = pooled_height
self.config.inputs[0].roi_pool_conf.spatial_scale = spatial_scale
self.set_cnn_layer(name, pooled_height, pooled_width, num_channels)
@config_layer('data')
class DataLayer(LayerBase):
def __init__(self,
name,
size,
depth=None,
height=None,
width=None,
device=None):
super(DataLayer, self).__init__(
name, 'data', size, inputs=[], device=device)
if height and width:
self.set_layer_height_width(height, width)
if depth:
self.set_layer_depth(depth)
'''
DataNormLayer: A layer for data normalization
Input: One and only one input layer is accepted. The input layer must
be DataLayer with dense data type
Output: The normalization of the input data
Reference:
LA Shalabi, Z Shaaban, B Kasasbeh. Data mining: A preprocessing engine
Example:
Layer(
name = "norm_input_layer",
type = "data_norm",
inputs = [Input("input_layer",
parameter_name = "_slot0.stats")],
data_norm_strategy = "z-score",
)
Note:
(1) The parameter has been calculated in the preprocessing stage,
and should be initialized by --init_model_path when training.
(2) Three data normalization methoeds are considered
z-score: y = (x-mean)/std
min-max: y = (x-min)/(max-min)
decimal-scaling: y = x/10^j, where j is the smallest integer such that max(|y|)<1
'''
@config_layer('data_norm')
class DataNormLayer(LayerBase):
def __init__(self, name, inputs, data_norm_strategy="z-score", device=None):
super(DataNormLayer, self).__init__(
name, 'data_norm', 0, inputs=inputs, device=device)
self.config.data_norm_strategy = data_norm_strategy
config_assert(len(inputs) == 1, 'DataNormLayer must have 1 input')
input_layer = self.get_input_layer(0)
self.set_layer_size(input_layer.size)
para_size = 5 * input_layer.size
para_dims = [5, input_layer.size]
self.inputs[0].is_static = True
self.create_input_parameter(0, para_size, para_dims)
@config_layer('prelu')
class ParameterReluLayer(LayerBase):
layer_type = 'prelu'
def __init__(self, name, inputs, partial_sum=1, **args):
super(ParameterReluLayer, self).__init__(
name, self.layer_type, 0, inputs=inputs, **args)
input_layer = self.get_input_layer(0)
config_assert(len(self.inputs) == 1, "prelu layer has only one input.")
config_assert(input_layer.size % partial_sum == 0,
"a wrong setting for partial_sum")
dims = [1, input_layer.size / partial_sum]
self.set_layer_size(input_layer.size)
self.config.partial_sum = partial_sum
self.create_input_parameter(0, input_layer.size / partial_sum, dims)
self.set_layer_height_width(self.get_input_layer(0).height, \
self.get_input_layer(0).width)
self.set_layer_depth(self.get_input_layer(0).depth)
@config_layer('conv')
class ConvLayerBase(LayerBase):
layer_type = 'conv'
def __init__(self,
name,
inputs=[],
bias=True,
num_filters=None,
shared_biases=False,
**xargs):
super(ConvLayerBase, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
if num_filters is not None:
self.config.num_filters = num_filters
use_mkldnn = int(g_command_config_args.get("use_mkldnn", 0))
use_gpu = int(g_command_config_args.get("use_gpu", 0))
parallel_nn = int(g_command_config_args.get("parallel_nn", 0))
# Automatically select cudnn_type for GPU, exconv for CPU
# and mkldnn_conv for MKLDNN
# if set type=conv, but still reserve the way user specify
# exconv, mkldnn_conv or cudnn_conv manually.
if self.layer_type == "cudnn_conv":
config_assert(use_gpu, "cudnn_conv only support GPU")
if self.layer_type == "mkldnn_conv":
config_assert(use_mkldnn, "mkldnn_conv only support MKLDNN")
if (use_gpu == 1 and self.layer_type != "exconv" and
self.layer_type != "mkldnn_conv" and
(parallel_nn == 0 or self.config.device > -1)):
self.layer_type = "cudnn_conv"
else:
self.layer_type = "mkldnn_conv" if use_mkldnn else "exconv"
# need to specify layer in config
self.config.type = self.layer_type
if shared_biases is not None:
self.config.shared_biases = shared_biases
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
conv_conf = self.config.inputs[input_index].conv_conf
parse_conv(self.inputs[input_index].conv, input_layer.name,
conv_conf, num_filters)
psize = self.calc_parameter_size(conv_conf)
self.create_input_parameter(input_index, psize)
self.set_cnn_layer(name, conv_conf.output_y, conv_conf.output_x,
self.config.num_filters)
psize = self.config.size
if shared_biases:
psize = self.config.num_filters
self.create_bias_parameter(bias, psize, [psize, 1])
def calc_parameter_size(self, conv_conf):
return self.config.num_filters * conv_conf.filter_channels \
* (conv_conf.filter_size * conv_conf.filter_size_y)
@config_layer('exconv')
class ConvLayer(ConvLayerBase):
layer_type = 'exconv'
@config_layer('mkldnn_conv')
class ConvLayer(ConvLayerBase):
layer_type = 'mkldnn_conv'
@config_layer('cudnn_conv')
class ConvLayer(ConvLayerBase):
layer_type = 'cudnn_conv'
@config_layer('convt')
class ConvTransLayerBase(LayerBase):
layer_type = 'convt'
def __init__(self,
name,
inputs=[],
bias=True,
num_filters=None,
shared_biases=False,
**xargs):
super(ConvTransLayerBase, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
if num_filters is not None:
self.config.num_filters = num_filters
use_gpu = int(g_command_config_args.get("use_gpu", 0))
parallel_nn = int(g_command_config_args.get("parallel_nn", 0))
# Automatically select cudnn_type for GPU and exconvt for CPU
# if set type=exconvt, but still reserve the way user specify
# exconvt or cudnn_convt manually.
if self.layer_type == "cudnn_convt":
config_assert(use_gpu, "cudnn_convt only support GPU")
if (use_gpu == 1 and self.layer_type != "exconvt" and
(parallel_nn == 0 or self.config.device > -1)):
self.layer_type = "cudnn_convt"
else:
self.layer_type = "exconvt"
# need to specify layer in config
self.config.type = self.layer_type
if shared_biases is not None:
self.config.shared_biases = shared_biases
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
parse_conv(
self.inputs[input_index].conv,
input_layer.name,
self.config.inputs[input_index].conv_conf,
num_filters,
trans=True)
conv_conf = self.config.inputs[input_index].conv_conf
psize = self.calc_parameter_size(conv_conf)
self.create_input_parameter(input_index, psize)
self.set_cnn_layer(name, conv_conf.img_size_y, conv_conf.img_size,
self.config.num_filters)
psize = self.config.size
if shared_biases:
psize = self.config.num_filters
self.create_bias_parameter(bias, psize, [psize, 1])
def calc_parameter_size(self, conv_conf):
return conv_conf.channels * conv_conf.filter_channels \
* (conv_conf.filter_size * conv_conf.filter_size_y)
@config_layer('exconvt')
class ConvTransLayer(ConvTransLayerBase):
layer_type = 'exconvt'
@config_layer('cudnn_convt')
class ConvTransLayer(ConvTransLayerBase):
layer_type = 'cudnn_convt'
@config_layer('conv_3d')
class Conv3DLayerBase(LayerBase):
def __init__(self,
name,
inputs=[],
bias=True,
num_filters=None,
shared_biases=True,
**xargs):
super(Conv3DLayerBase, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
if num_filters is not None:
self.config.num_filters = num_filters
# need to specify layer in config
self.config.type = self.layer_type
trans = False
if self.config.type == "deconv3d":
trans = True
if shared_biases is not None:
self.config.shared_biases = shared_biases
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
conv_conf = self.config.inputs[input_index].conv_conf
parse_conv3d(
self.inputs[input_index].conv,
input_layer.name,
conv_conf,
num_filters,
trans=trans
) # for z-axis pad:0, strid:1, filter_size:1, img_size:1
psize = self.calc_parameter_size(conv_conf)
self.create_input_parameter(input_index, psize)
if trans:
self.set_cnn_layer(name, conv_conf.img_size_z,
conv_conf.img_size_y, conv_conf.img_size,
self.config.num_filters)
else:
self.set_cnn_layer(name, conv_conf.output_z, conv_conf.output_y,
conv_conf.output_x, self.config.num_filters)
psize = self.config.size
if shared_biases:
psize = self.config.num_filters
self.create_bias_parameter(bias, psize, [psize, 1])
def calc_parameter_size(self, conv_conf):
return self.config.num_filters * conv_conf.filter_channels \
* (conv_conf.filter_size * conv_conf.filter_size_y \
* conv_conf.filter_size_z)
def set_cnn_layer(self,
input_layer_name,
depth,
height,
width,
channels,
is_print=True):
size = depth * height * width * channels
self.set_layer_size(size)
self.set_layer_height_width(height, width)
self.set_layer_depth(depth)
if is_print:
print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, depth, height, width, size))
@config_layer('conv3d')
class Conv3DLayer(Conv3DLayerBase):
layer_type = 'conv3d'
@config_layer('deconv3d')
class Conv3DLayer(Conv3DLayerBase):
layer_type = 'deconv3d'
@config_layer('norm')
class NormLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(NormLayer, self).__init__(name, 'norm', 0, inputs=inputs, **xargs)
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
use_mkldnn = True if use_mkldnn and self.inputs[
0].norm.norm_type == 'cmrnorm-projection' else False
self.config.type = 'mkldnn_lrn' if use_mkldnn else self.config.type
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
norm_conf = self.config.inputs[input_index].norm_conf
parse_norm(self.inputs[input_index].norm, input_layer.name,
norm_conf)
norm_conf.scale = self.inputs[
input_index].norm.scale if use_mkldnn else norm_conf.scale
self.set_cnn_layer(name, norm_conf.output_y, norm_conf.output_x,
norm_conf.channels, False)
if norm_conf.norm_type == "cross-channel-norm":
self.create_input_parameter(0, norm_conf.channels,
[norm_conf.channels, 1])
@config_layer('pool')
class PoolLayer(LayerBase):
layer_type = 'pool'
def __init__(self, name, inputs, ceil_mode=True, exclude_mode=None,
**xargs):
use_mkldnn = int(g_command_config_args.get("use_mkldnn", 0))
if self.layer_type == "mkldnn_pool":
config_assert(use_mkldnn, "mkldnn_pool only support MKLDNN")
self.layer_type = 'mkldnn_pool' if use_mkldnn else 'pool'
super(PoolLayer, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
pool_conf = self.config.inputs[input_index].pool_conf
parse_pool(self.inputs[input_index].pool, input_layer.name,
pool_conf, ceil_mode, exclude_mode)
self.set_cnn_layer(name, pool_conf.output_y, pool_conf.output_x,
pool_conf.channels)
@config_layer('mkldnn_pool')
class MKLDNNPoolLayer(PoolLayer):
layer_type = 'mkldnn_pool'
@config_layer('pool3d')
class Pool3DLayer(LayerBase):
def __init__(self, name, inputs, ceil_mode=True, **xargs):
super(Pool3DLayer, self).__init__(
name, 'pool3d', 0, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
pool_conf = self.config.inputs[input_index].pool_conf
parse_pool3d(self.inputs[input_index].pool, input_layer.name,
pool_conf, ceil_mode)
self.set_cnn_layer(name, pool_conf.output_z, pool_conf.output_y,
pool_conf.output_x, pool_conf.channels)
def set_cnn_layer(self,
input_layer_name,
depth,
height,
width,
channels,
is_print=True):
size = depth * height * width * channels
self.set_layer_size(size)
self.set_layer_height_width(height, width)
self.set_layer_depth(depth)
if is_print:
print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, depth, height, width, size))
@config_layer('spp')
class SpatialPyramidPoolLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(SpatialPyramidPoolLayer, self).__init__(
name, 'spp', 0, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
spp_conf = self.config.inputs[input_index].spp_conf
parse_spp(self.inputs[input_index].spp, input_layer.name, spp_conf)
output_x = (pow(4, spp_conf.pyramid_height) - 1) / (4 - 1)
self.set_cnn_layer(name, 1, output_x, spp_conf.image_conf.channels)
@config_layer('upsample')
class UpsampleLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(UpsampleLayer, self).__init__(
name, 'upsample', 0, inputs=inputs, **xargs)
input_layer = self.get_input_layer(0)
image_conf = self.config.inputs[0].upsample_conf.image_conf
image_conf.img_size = input_layer.width
image_conf.img_size_y = input_layer.height
image_conf.channels = input_layer.size / (input_layer.width *
input_layer.height)
upsample = self.inputs[0].upsample
output_x = 0
output_y = 0
output_size = 0
if upsample.scale:
self.config.inputs[0].upsample_conf.scale = upsample.scale
self.config.inputs[0].upsample_conf.scale_y = upsample.scale_y
output_x = input_layer.width * upsample.scale
output_y = input_layer.height * upsample.scale_y
self.config.inputs[0].upsample_conf.pad_out_x = upsample.pad_out_x
self.config.inputs[0].upsample_conf.pad_out_y = upsample.pad_out_y
if upsample.upsample_size:
self.config.inputs[
0].upsample_conf.upsample_size = upsample.upsample_size
self.config.inputs[
0].upsample_conf.upsample_size_y = upsample.upsample_size_y
output_x = upsample.upsample_size
output_y = upsample.upsample_size_y
output_size = image_conf.channels * output_x * output_y
self.set_layer_height_width(output_y, output_x)
self.set_layer_depth(input_layer.depth)
self.set_layer_size(output_size)
@config_layer('pad')
class PadLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(PadLayer, self).__init__(name, 'pad', 0, inputs=inputs, **xargs)
pad = self.inputs[0].pad
self.config.inputs[0].pad_conf.pad_c.extend(pad.pad_c)
self.config.inputs[0].pad_conf.pad_h.extend(pad.pad_h)
self.config.inputs[0].pad_conf.pad_w.extend(pad.pad_w)
input_layer = self.get_input_layer(0)
image_conf = self.config.inputs[0].pad_conf.image_conf
parse_image(pad, input_layer.name, image_conf)
out_ch = pad.channels + pad.pad_c[0] + pad.pad_c[1]
out_h = image_conf.img_size_y + pad.pad_h[0] + pad.pad_h[1]
out_w = image_conf.img_size + pad.pad_w[0] + pad.pad_w[1]
self.set_cnn_layer(name, out_h, out_w, out_ch)
self.config.size = out_ch * out_h * out_w
@config_layer('crop')
class CropLayer(LayerBase):
def __init__(self, name, inputs, axis, offset, shape, **xargs):
super(CropLayer, self).__init__(name, 'crop', 0, inputs=inputs, **xargs)
self.config.axis = axis
self.config.offset.extend(offset)
self.config.shape.extend(shape)
# get channel, width and height from input_0 layer
input_layer = self.get_input_layer(0)
image_conf = self.config.inputs[0].image_conf
image_conf.img_size = input_layer.width
image_conf.img_size_y = input_layer.height
image_conf.channels = input_layer.size / (input_layer.width *
input_layer.height)
# only support for 4-dims inputs and NCHW order
if (len(self.config.inputs) == 2):
self.set_layer_height_width(
self.get_input_layer(1).height, self.get_input_layer(1).width)
self.set_layer_size(self.get_input_layer(1).size)
else:
self.set_layer_height_width(shape[-2], shape[-1])
self.set_layer_size(reduce(lambda x, y: x * y, shape[1:]))
@config_layer('batch_norm')
class BatchNormLayer(LayerBase):
layer_type = 'batch_norm'
def __init__(self,
name,
inputs,
bias=True,
img3D=False,
use_global_stats=True,
epsilon=1e-5,
moving_average_fraction=0.9,
batch_norm_type=None,
mean_var_names=None,
**xargs):
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
inputs = [inputs]
config_assert(
len(inputs) == 1, "BatchNormLayer must have one and only one input")
# Create Input for moving mean and std,
# in batch normalization layer.
# These paras no need to update, so set is_static is true.
# If not use is_static, even set learning_rate = 0, decay_rate = 0,
# these paras will change if set average_window in configure.
use_gpu = bool(int(g_command_config_args.get("use_gpu", 0)))
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
is_shared = True if not use_gpu else False
for i in xrange(2):
inputs.append(
Input(
inputs[0].input_layer_name,
initial_std=0.0,
initial_mean=0.0,
is_static=True,
is_shared=is_shared,
make_layer_name_in_submodel=False, ))
parallel_nn = bool(int(g_command_config_args.get("parallel_nn", 0)))
cudnn_version = int(g_command_config_args.get("cudnn_version", 0))
# Automatically select cudnn_batch_norm for GPU, batch_norm for CPU
# and mkldnn_batch_norm for MKLDNN. Also based on cudnn version.
if batch_norm_type == "mkldnn_batch_norm":
config_assert(use_mkldnn, "mkldnn_batch_norm only support MKLDNN")
use_cudnn = use_gpu and batch_norm_type != "batch_norm" and \
not use_mkldnn and batch_norm_type != "mkldnn_batch_norm" and \
((not parallel_nn) or self.config.device > -1)
if use_cudnn:
self.layer_type = "cudnn_batch_norm"
else:
self.layer_type = "mkldnn_batch_norm" if use_mkldnn else "batch_norm"
super(BatchNormLayer, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
if use_global_stats is not None:
self.config.use_global_stats = use_global_stats
if moving_average_fraction is not None:
self.config.moving_average_fraction = moving_average_fraction
if epsilon is not None:
assert epsilon >= 1e-5, "epsilon must be no less than 1e-5."
self.config.epsilon = epsilon
input_layer = self.get_input_layer(0)
image_conf = self.config.inputs[0].image_conf
if img3D:
parse_image3d(self.inputs[0].image, input_layer.name, image_conf)
# Only pass the width and height of input to batch_norm layer
# when either of it is non-zero.
if input_layer.width != 0 or input_layer.height != 0:
self.set_cnn_layer(
input_layer_name=name,
depth=image_conf.img_size_z,
height=image_conf.img_size_y,
width=image_conf.img_size,
channels=image_conf.channels,
is_print=True)
else:
self.set_layer_size(input_layer.size)
else:
parse_image(self.inputs[0].image, input_layer.name, image_conf)
# Only pass the width and height of input to batch_norm layer
# when either of it is non-zero.
if input_layer.width != 0 or input_layer.height != 0:
self.set_cnn_layer(
input_layer_name=name,
height=image_conf.img_size_y,
width=image_conf.img_size,
channels=image_conf.channels,
is_print=True)
else:
self.set_layer_size(input_layer.size)
psize = self.calc_parameter_size(image_conf)
dims = [1, psize]
if mean_var_names is not None:
assert len(mean_var_names) == 2
self.inputs[1].parameter_name = mean_var_names[0]
self.inputs[2].parameter_name = mean_var_names[1]
self.create_input_parameter(0, psize)
self.create_input_parameter(1, psize, dims)
self.create_input_parameter(2, psize, dims)
self.create_bias_parameter(bias, psize)
def set_cnn_layer(self,
input_layer_name,
depth=None,
height=None,
width=None,
channels=None,
is_print=True):
depthIsNone = False
if depth is None:
depth = 1
depthIsNone = True
size = depth * height * width * channels
self.set_layer_size(size)
self.set_layer_height_width(height, width)
self.set_layer_depth(depth)
if is_print and depthIsNone:
print("output for %s: c = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, height, width, size))
elif is_print:
print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, depth, height, width, size))
def calc_parameter_size(self, image_conf):
return image_conf.channels
@config_layer('trans')
class TransLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(TransLayer, self).__init__(
name, 'trans', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'TransLayer must have one and only one input')
self.set_layer_size(self.get_input_layer(0).size)
@config_layer('resize')
class ResizeLayer(LayerBase):
def __init__(self, name, size, inputs, **xargs):
super(ResizeLayer, self).__init__(
name, 'resize', size=size, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'ResizeLayer must have one and only one input')
@config_layer('rotate')
class RotateLayer(LayerBase):
def __init__(self, name, inputs, height, width, device=None):
super(RotateLayer, self).__init__(
name, 'rotate', 0, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 1,
'RotateLayer must have one and only one input')
self.set_layer_height_width(height, width)
self.set_layer_size(self.get_input_layer(0).size)
@config_layer('blockexpand')
class BlockExpandLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(BlockExpandLayer, self).__init__(
name, 'blockexpand', 0, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
parse_block_expand(
self.inputs[input_index].block_expand, input_layer.name,
self.config.inputs[input_index].block_expand_conf)
block_expand_conf = self.config.inputs[
input_index].block_expand_conf
self.set_layer_size(block_expand_conf.block_x *
block_expand_conf.block_y *
block_expand_conf.channels)
@config_layer('maxout')
class MaxOutLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(MaxOutLayer, self).__init__(
name, 'maxout', 0, inputs=inputs, **xargs)
input_layer = self.get_input_layer(0)
maxout_conf = self.config.inputs[0].maxout_conf
parse_maxout(self.inputs[0].maxout, input_layer.name, maxout_conf)
out_channels = maxout_conf.image_conf.channels / maxout_conf.groups
self.set_cnn_layer(name, maxout_conf.image_conf.img_size_y,
maxout_conf.image_conf.img_size, out_channels)
@config_layer('row_conv')
class RowConvLayer(LayerBase):
def __init__(self, name, inputs, context_length, **xargs):
super(RowConvLayer, self).__init__(
name, 'row_conv', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'row convolution layer must have one and only one input.')
input_layer = self.get_input_layer(0)
row_conv_conf = self.config.inputs[0].row_conv_conf
row_conv_conf.context_length = context_length
self.set_layer_size(input_layer.size)
psize = context_length * input_layer.size
dims = [context_length, input_layer.size]
self.create_input_parameter(0, psize, dims)
@config_layer('clip')
class ClipLayer(LayerBase):
def __init__(self, name, inputs, min, max, **xargs):
super(ClipLayer, self).__init__(name, 'clip', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'ClipLayer must have one and only one input.')
config_assert(min < max, 'min must be less than max.')
input_layer = self.get_input_layer(0)
self.set_layer_size(input_layer.size)
self.config.inputs[0].clip_conf.min = min
self.config.inputs[0].clip_conf.max = max
@config_layer('scale_shift')
class ScaleShiftLayer(LayerBase):
def __init__(self, name, inputs, bias=True, **xargs):
super(ScaleShiftLayer, self).__init__(
name, 'scale_shift', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'ScaleShiftLayer must have one and only one input.')
input_layer = self.get_input_layer(0)
self.set_layer_size(input_layer.size)
self.create_input_parameter(0, 1, [1, 1])
self.create_bias_parameter(bias, 1)
# key: cost type
# value: cost class
g_cost_map = {}
# define a cost layer without any parameters
def define_cost(class_name, cost_type):
def init(cls, name, inputs, device=None, coeff=1.):
super(type(cls), cls).__init__(
name, cost_type, 1, inputs, device=device, coeff=coeff)
cls = type(class_name, (LayerBase, ), dict(__init__=init))
global g_cost_map
g_cost_map[cost_type] = cls
define_cost('MultiClassCrossEntropy', 'multi-class-cross-entropy')
define_cost('CrossEntropyOverBeamCostLayer', 'cross_entropy_over_beam')
define_cost('RankingCost', 'rank-cost')
define_cost('AucValidation', 'auc-validation')
define_cost('PnpairValidation', 'pnpair-validation')
define_cost('SumOfSquaresCostLayer', 'square_error')
define_cost('MultiBinaryLabelCrossEntropy', 'multi_binary_label_cross_entropy')
define_cost('SoftBinaryClassCrossEntropy', 'soft_binary_class_cross_entropy')
define_cost('HuberTwoClassification', 'huber_classification')
define_cost('SumCost', 'sum_cost')
define_cost('SmoothL1Cost', 'smooth_l1')
@config_layer('hsigmoid')
class HierarchicalSigmoidLayer(LayerBase):
def __init__(self, name, num_classes, inputs, device=None, bias=True):
super(HierarchicalSigmoidLayer, self).__init__(
name, 'hsigmoid', 1, inputs=inputs, device=device)
config_assert(
len(self.inputs) >= 2,
'HierarchicalSigmoidLayer must have at least 2 inputs')
self.config.num_classes = num_classes
for input_index in xrange(len(self.inputs) - 1):
input_layer = self.get_input_layer(input_index)
psize = (num_classes - 1) * input_layer.size
dims = [num_classes - 1, input_layer.size]
self.create_input_parameter(input_index, psize, dims)
self.create_bias_parameter(bias, num_classes - 1)
'''
lambdaCost for lambdaRank LTR approach
Usage:
Example: Layer(name = "cost", type = "lambda_cost", NDCG_num = 8,
max_sort_size = -1, inputs = ["output", "score"])
Input data: Samples of the same query should be loaded as a sequence,
by PyDataProvider etc.. User should provide
scores for each sample. The score slot should be the 2nd
input of lambdaRank layer.
NDCG_num = the size of NDCG, e.g., 5 for NDCG@5.
Note: NDCG_num must be less than or equal to the minimum
size of lists.
max_sort_size = the size of partial sorting in calculating gradient.
Note: If max_sort_size = -1, then for each list, the algorithm will
sort the entire list to get gradient.
In other cases, max_sort_size must be greater than or equal
to NDCG_num.
max_sort_size can be greater than the size of a list, in which
case the algorithm will sort the entire list to get gradient.
'''
@config_layer('lambda_cost')
class LambdaCost(LayerBase):
def __init__(self, name, inputs, NDCG_num=5, max_sort_size=-1, device=None):
super(LambdaCost, self).__init__(
name, 'lambda_cost', 1, inputs=inputs, device=device)
config_assert(len(self.inputs) == 2, 'lambdaCost must have 2 inputs')
self.config.NDCG_num = NDCG_num
if max_sort_size != -1:
config_assert(
NDCG_num <= max_sort_size,
'NDCG_num must be less than or equal to max_sort_size')
self.config.max_sort_size = max_sort_size
@config_layer('huber_regression')
class HuberRegressionLoss(LayerBase):
def __init__(self, name, inputs, delta=1., coeff=1., device=None):
super(HuberRegressionLoss, self).__init__(
name, 'huber_regression', 1, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 2, 'HuberRegression must have 2 inputs')
self.config.delta = delta
self.config.coeff = coeff
@config_layer('nce')
class NCELayer(LayerBase):
def __init__(self,
name,
num_classes,
inputs,
num_neg_samples=10,
neg_sampling_dist=None,
bias=True,
**xargs):
super(NCELayer, self).__init__(name, 'nce', 1, inputs=inputs, **xargs)
config_assert(
len(self.inputs) >= 2, 'NCELayer must have at least 2 inputs')
self.config.num_classes = num_classes
if neg_sampling_dist is not None:
config_assert(
len(neg_sampling_dist) == num_classes,
'len(neg_sampling_dist)(%s) is not same as num_classes (%s)' %
(len(neg_sampling_dist), num_classes))
s = sum(neg_sampling_dist)
config_assert(
abs(s - 1) < 1e-5,
'The sum of neg_sampling_dist (%s) is not 1' % s)
self.config.neg_sampling_dist.extend(neg_sampling_dist)
self.config.num_neg_samples = num_neg_samples
num_real_inputs = len(self.inputs) - 1
input_layer = self.get_input_layer(num_real_inputs)
config_assert(input_layer.type == 'data',
'Expecting the last input layer of an nce layer to be '
'a data layer')
if (num_real_inputs > 1 and input_layer.size == 1 and
self.get_input_layer(num_real_inputs - 1).type == 'data'):
# This input layer is assumed to be a sample weight layer
num_real_inputs -= 1
for input_index in xrange(num_real_inputs):
input_layer = self.get_input_layer(input_index)
psize = num_classes * input_layer.size
dims = [num_classes, input_layer.size]
self.create_input_parameter(input_index, psize, dims)
self.create_bias_parameter(bias, num_classes)
@config_layer('addto')
class AddToLayer(LayerBase):
layer_type = 'addto'
def __init__(self, name, inputs, bias=True, **xargs):
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
if self.layer_type == "mkldnn_addto":
config_assert(use_mkldnn, "mkldnn_addto only support MKLDNN")
self.layer_type = 'mkldnn_addto' if use_mkldnn else 'addto'
super(AddToLayer, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
config_assert(len(inputs) > 0, 'inputs cannot be empty for AddToLayer')
layer_size = self.get_input_layer(0).size
# To reserve heght, width, depth.
layer_with_hwc = self.get_input_layer(0)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
assert layer_size == input_layer.size
if input_layer.height and input_layer.height and input_layer.height:
layer_with_hwc = input_layer
self.set_layer_size(layer_with_hwc.size)
self.set_layer_height_width(layer_with_hwc.height, layer_with_hwc.width)
self.set_layer_depth(layer_with_hwc.depth)
self.create_bias_parameter(bias, self.config.size)
@config_layer('mkldnn_addto')
class MKLDNNAddtoLayer(AddToLayer):
layer_type = 'mkldnn_addto'
@config_layer('agent')
class AgentLayer(LayerBase):
def __init__(self, name, size, device=None):
super(AgentLayer, self).__init__(
name, 'agent', size, inputs=[], device=device)
@config_layer('gather_agent')
class GatherAgentLayer(LayerBase):
def __init__(self, name, size, device=None):
super(GatherAgentLayer, self).__init__(
name, 'gather_agent', size, inputs=[], device=device)
@config_layer('scatter_agent')
class ScatterAgentLayer(LayerBase):
def __init__(self, name, size, width=None, height=None, device=None):
super(ScatterAgentLayer, self).__init__(
name, 'scatter_agent', size, inputs=[], device=device)
if height and width:
self.set_layer_height_width(height, width)
@config_layer('multiplex')
class MultiplexLayer(LayerBase):
def __init__(self, name, inputs, size, device=None):
super(MultiplexLayer, self).__init__(
name, 'multiplex', size, inputs=inputs, device=device)
config_assert(
len(inputs) > 2, 'MultiplexLayer should have more than 2 inputs.')
for i in range(1, len(inputs)):
config_assert(
self.get_input_layer(i).size == size,
"All the input layers except the first one should"
"have the same size as the MultiplexLayer.")
@config_func
def Link(name, has_subseq=False):
"""
Still keeping has_subseq for backward compatibility
"""
link_config = LinkConfig()
link_config.link_name = name
return link_config
# memory for recurrent layer group.
# *name* and *size* are actual layer's name and size.
# If *name* is None, need to provide *memory_name* and need to use
# SetMemoryInput() later to specify the layer which this memory remembers.
#
# return the name of the memory,
# use this name if you assign the memory as other layer's input
#
# boot frame of memory is zeroed by default,
# or initialize by boot layer output if *boot_layer* set,
# or initialize by trainable bias if *boot_bias* set,
# or initialize by a constant id if *boot_with_const_id* set
#
# Memory can be a sequence if *is_sequence* set, this type of memory
# can only be initailized by a *boot_layer* which is a sequence.
#
@config_func
def Memory(name,
size,
is_sequence=False,
boot_layer=None,
boot_bias=False,
boot_bias_active_type="",
boot_with_const_id=None,
memory_name=None):
if not memory_name:
config_assert(name is not None, "name needs cannot be None")
memory_name = name + "+delay1"
agent_name = memory_name
agent_layer = AgentLayer(agent_name, size)
config_assert(g_current_submodel.is_recurrent_layer_group,
'Memory should be used in recurrent layer group only')
memory = g_current_submodel.memories.add()
if name is not None:
memory.layer_name = MakeLayerNameInSubmodel(name)
memory.link_name = MakeLayerNameInSubmodel(agent_name)
options = sum((boot_layer is not None, bool(boot_bias),
boot_with_const_id is not None))
config_assert(
options <= 1,
'take one option at most from boot_layer, boot_bias, or boot_with_const_id'
)
if boot_layer is not None:
boot_layer = MakeLayerNameInParentSubmodel(boot_layer)
config_assert(boot_layer in g_layer_map,
'boot_layer "%s" does not correspond to a layer name' %
boot_layer)
memory.boot_layer_name = boot_layer
elif boot_bias:
memory.boot_bias_parameter_name = agent_layer.create_bias_parameter(
boot_bias, size, for_self=False)
memory.boot_bias_active_type = boot_bias_active_type
elif boot_with_const_id is not None:
memory.boot_with_const_id = boot_with_const_id
return agent_name
@config_func
def SetMemoryInput(memory_name, layer_name):
memory_name = MakeLayerNameInSubmodel(memory_name)
layer_name = MakeLayerNameInSubmodel(layer_name)
for mem in g_current_submodel.memories:
if mem.link_name == memory_name:
mem.layer_name = layer_name
return
logger.fatal("Nonexistent memory name: " + memory_name)
# Generator for recurrent layer group, to use it:
# 1. define a id layer as output of layer group
# 2. define a memory of this id layer, and assign a boot id(begin of sequence)
# 3. define a eos check layer and fill its name in generator's *eos_layer_name*
# Sequence generation will stop when eos check return 1 or *max_num_frames* reached.
# If *beam_size* is greater than one, generator will use beam search.
# in beam search, if *num_results_per_sample* set, one sample sequence can output
# multiple results each with a probility.
@config_func
def Generator(
max_num_frames,
eos_layer_name="eos_check",
num_results_per_sample=1,
beam_size=1,
log_prob=None, ):
generator_config = GeneratorConfig()
generator_config.max_num_frames = max_num_frames
generator_config.eos_layer_name = eos_layer_name
generator_config.num_results_per_sample = num_results_per_sample
generator_config.beam_size = beam_size
if log_prob is not None:
generator_config.log_prob = log_prob
return generator_config
@config_layer('expand')
class ExpandLayer(LayerBase):
def __init__(self, name, inputs, trans_type='non-seq', bias=False, **xargs):
super(ExpandLayer, self).__init__(
name, 'expand', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 2, 'ExpandLayer takes 2 and only 2 inputs')
self.config.trans_type = trans_type
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
self.set_layer_size(self.get_input_layer(0).size)
self.create_bias_parameter(bias, self.config.size)
@config_layer('featmap_expand')
class FeatMapExpandLayer(LayerBase):
def __init__(self,
name,
inputs,
num_filters=None,
as_row_vector=True,
bias=False,
**xargs):
super(FeatMapExpandLayer, self).__init__(
name, 'featmap_expand', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1, 'ExpandLayer takes 1 and only 1 inputs')
if num_filters is not None:
self.config.num_filters = num_filters
else:
logger.fatal("FeatMapExpandLayer must specify num_filters.")
if not as_row_vector:
self.config.user_arg = "as_col_vec"
self.set_layer_size(self.get_input_layer(0).size * num_filters)
@config_layer('max')
class MaxLayer(LayerBase):
def __init__(self,
name,
inputs,
trans_type='non-seq',
bias=False,
output_max_index=None,
stride=-1,
**xargs):
super(MaxLayer, self).__init__(name, 'max', 0, inputs=inputs, **xargs)
config_assert(len(self.inputs) == 1, 'MaxLayer must have 1 input')
if trans_type == 'seq':
config_assert(stride == -1, 'subseq does not support stride window')
self.config.trans_type = trans_type
self.config.seq_pool_stride = stride
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
self.set_layer_size(input_layer.size)
self.create_bias_parameter(bias, self.config.size)
if output_max_index is not None:
self.config.output_max_index = output_max_index
@config_layer('maxid')
class MaxIdLayer(LayerBase):
def __init__(self, name, inputs, beam_size=None, device=None):
super(MaxIdLayer, self).__init__(
name, 'maxid', 0, inputs=inputs, device=device)
config_assert(len(self.inputs) == 1, 'MaxIdLayer must have 1 input')
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
self.set_layer_size(input_layer.size)
if beam_size is None:
global g_current_submodel
if g_current_submodel.HasField("generator"):
self.config.beam_size = g_current_submodel.generator.beam_size
else:
self.config.beam_size = beam_size
@config_layer('eos_id')
class EosIdLayer(LayerBase):
def __init__(self, name, inputs, eos_id, device=None):
super(EosIdLayer, self).__init__(
name, 'eos_id', 0, inputs=inputs, device=device)
config_assert(len(self.inputs) == 1, 'EosIdLayer must have 1 input')
self.set_layer_size(2) # boolean output
self.config.eos_id = eos_id
@config_layer('seqlastins')
class SequenceLastInstanceLayer(LayerBase):
def __init__(self,
name,
inputs,
trans_type='non-seq',
bias=False,
stride=-1,
**xargs):
super(SequenceLastInstanceLayer, self).__init__(
name, 'seqlastins', 0, inputs=inputs, **xargs)
config_assert(
len(inputs) == 1, 'SequenceLastInstanceLayer must have 1 input')
if trans_type == 'seq':
config_assert(stride == -1, 'subseq does not support stride window')
self.config.trans_type = trans_type
self.config.seq_pool_stride = stride
self.set_layer_size(self.get_input_layer(0).size)
self.create_bias_parameter(bias, self.config.size)
@config_layer('seqfirstins')
class SequenceFirstInstanceLayer(SequenceLastInstanceLayer):
def __init__(self,
name,
inputs,
trans_type='non-seq',
bias=False,
stride=-1,
**xargs):
super(SequenceFirstInstanceLayer, self).__init__(
name,
inputs=inputs,
trans_type=trans_type,
bias=bias,
stride=stride,
**xargs)
self.config.select_first = True
@config_layer('seqconcat')
class SequenceConcatLayer(LayerBase):
def __init__(self, name, inputs, bias=False, **xargs):
super(SequenceConcatLayer, self).__init__(
name, 'seqconcat', 0, inputs=inputs, **xargs)
config_assert(
len(inputs) == 2, 'SequenceConcatLayer must have 2 inputs')
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
self.set_layer_size(input_layer.size)
self.create_bias_parameter(bias, self.config.size)
@config_layer('seqreshape')
class SequenceReshapeLayer(LayerBase):
def __init__(self, name, size, inputs, bias=False, **xargs):
super(SequenceReshapeLayer, self).__init__(
name, 'seqreshape', size, inputs=inputs, **xargs)
config_assert(
len(inputs) == 1, 'SequenceReshapeLayer must have 1 inputs')
self.set_layer_size(size)
self.create_bias_parameter(bias, size)
@config_layer('subseq')
class SubSequenceLayer(LayerBase):
def __init__(self, name, inputs, bias=False, **xargs):
super(SubSequenceLayer, self).__init__(
name, 'subseq', 0, inputs=inputs, **xargs)
config_assert(len(inputs) == 3, 'SubSequenceLayer must have 3 inputs')
input_layer0 = self.get_input_layer(0)
size = input_layer0.size
self.set_layer_size(size)
self.create_bias_parameter(bias, size)
@config_layer('seq_slice')
class SeqSliceLayer(LayerBase):
def __init__(self, name, inputs, starts, ends, bias=False, **xargs):
if isinstance(inputs, list):
assert len(inputs) == 1, ('the first input of sequence slice layer '
'is a single sequence input.')
else:
inputs = [inputs]
if starts is not None:
if isinstance(starts, list):
assert len(starts) == 1, (
'the start indices for sequence slice layer cannot '
'be a list having more than one element.')
starts = starts[0]
inputs.append(starts)
if ends is not None:
if isinstance(ends, list):
assert len(ends) == 1, (
'the end indices for sequence slice layer cannot '
'be a list having more than one element.')
ends = ends[0]
inputs.append(ends)
assert len(inputs) >= 2, (
'the sequence slice layer has at least two inputs.')
super(SeqSliceLayer, self).__init__(
name, 'seq_slice', 0, inputs=inputs, **xargs)
input_layer0 = self.get_input_layer(0)
size = input_layer0.size
self.set_layer_size(size)
if len(inputs) == 3:
assert (
self.get_input_layer(1).size == self.get_input_layer(2).size), (
'If start and end indices are both given to'
'sequence slice layer, they should have the same width.')
elif len(inputs) == 2:
self.config.select_first = (starts is not None)
@config_layer('sub_nested_seq')
class SubNestedSequenceLayer(LayerBase):
def __init__(self, name, inputs, selected_indices, bias=False, **xargs):
if isinstance(inputs, list):
assert len(inputs) == 1, ('the first input of sub_nested_seq '
'layer is a single nested sequence.')
inputs = inputs[0]
if isinstance(selected_indices, list):
assert len(selected_indices) == 1, (
'the second input of '
'sub_nested_seq layer is a single layer which is a '
'set of selected indices.')
selected_indices = selected_indices[0]
super(SubNestedSequenceLayer, self).__init__(
name,
'sub_nested_seq',
0,
inputs=[inputs, selected_indices],
**xargs)
input_layer0 = self.get_input_layer(0)
size = input_layer0.size
self.set_layer_size(size)
@config_layer('dot_prod')
class DotProdLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(DotProdLayer, self).__init__(
name, 'dot_prod', 0, inputs, device=device)
config_assert(len(inputs) == 2, 'DotProdLayer must have 2 inputs.')
config_assert(
self.get_input_layer(0).size == self.get_input_layer(1).size,
"Two inputs should have the same size.")
self.set_layer_size(1)
@config_layer('out_prod')
class OuterProdLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(OuterProdLayer, self).__init__(
name, 'out_prod', 0, inputs=inputs, device=device)
config_assert(len(inputs) == 2, 'OuterProdLayer must have 2 inputs')
input_layer0 = self.get_input_layer(0)
input_layer1 = self.get_input_layer(1)
self.set_layer_size(input_layer0.size * input_layer1.size)
@config_layer('power')
class PowerLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(PowerLayer, self).__init__(
name, 'power', 0, inputs=inputs, device=device)
config_assert(len(inputs) == 2, 'PowerLayer must have 2 inputs')
input_layer1 = self.get_input_layer(1)
self.set_layer_size(input_layer1.size)
input_layer0 = self.get_input_layer(0)
config_assert(1 == input_layer0.size,
'The left input is the exponent and should be of size 1')
@config_layer('slope_intercept')
class SlopeInterceptLayer(LayerBase):
def __init__(self, name, inputs, slope=1.0, intercept=0.0, device=None):
super(SlopeInterceptLayer, self).__init__(
name, 'slope_intercept', 0, inputs=inputs, device=device)
self.config.slope = slope
self.config.intercept = intercept
config_assert(len(inputs) == 1, 'SlopeInterceptLayer must have 1 input')
input_layer0 = self.get_input_layer(0)
self.set_layer_size(input_layer0.size)
@config_layer('scaling')
class ScalingLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(ScalingLayer, self).__init__(
name, 'scaling', 0, inputs=inputs, device=device)
config_assert(len(inputs) == 2, 'ScalingLayer must have 2 inputs')
input_layer1 = self.get_input_layer(1)
self.set_layer_size(input_layer1.size)
input_layer0 = self.get_input_layer(0)
config_assert(1 == input_layer0.size,
'The left input should be of size 1')
@config_layer('conv_shift')
class ConvShiftLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(ConvShiftLayer, self).__init__(
name, 'conv_shift', 0, inputs=inputs, device=device)
config_assert(len(inputs) == 2, 'ConvShiftLayer must have 2 inputs')
input_layer0 = self.get_input_layer(0)
self.set_layer_size(input_layer0.size)
@config_layer('convex_comb')
class ConvexCombinationLayer(LayerBase):
def __init__(self, name, size, inputs, device=None):
super(ConvexCombinationLayer, self).__init__(
name, 'convex_comb', size, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 2, 'ConvexCombinationLayer must have 2 inputs')
config_assert(
size * self.get_input_layer(0).size == self.get_input_layer(1).size,
'Wrong input size for ConvexCombinationLayer')
self.set_layer_size(size)
@config_layer('interpolation')
class InterpolationLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(InterpolationLayer, self).__init__(
name, 'interpolation', 0, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 3, 'InterpolationLayer must have 3 inputs')
input_layer0 = self.get_input_layer(0)
input_layer1 = self.get_input_layer(1)
input_layer2 = self.get_input_layer(2)
self.set_layer_size(input_layer1.size)
config_assert(input_layer0.size == 1, 'weight should be of size 1')
config_assert(input_layer1.size == input_layer2.size,
'the two vector inputs should be of the same size')
@config_layer('bilinear_interp')
class BilinearInterpLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(BilinearInterpLayer, self).__init__(
name, 'bilinear_interp', 0, inputs=inputs, **xargs)
input_layer = self.get_input_layer(0)
conf = self.config.inputs[0].bilinear_interp_conf
parse_bilinear(self.inputs[0].bilinear_interp, input_layer.name, conf)
self.set_cnn_layer(name, conf.out_size_y, conf.out_size_x,
conf.image_conf.channels)
@config_layer('sum_to_one_norm')
class SumToOneNormLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(SumToOneNormLayer, self).__init__(
name, 'sum_to_one_norm', 0, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 1, 'SumToOneNormLayer must have 1 input')
input_layer0 = self.get_input_layer(0)
self.set_layer_size(input_layer0.size)
@config_layer('row_l2_norm')
class RowL2NormLayer(LayerBase):
def __init__(self, name, inputs, **xargs):
super(RowL2NormLayer, self).__init__(
name, 'row_l2_norm', 0, inputs=inputs, **xargs)
config_assert(len(self.inputs) == 1, 'RowL2NormLayer must have 1 input')
input_layer = self.get_input_layer(0)
self.set_layer_size(input_layer.size)
@config_layer('cos')
class CosSimLayer(LayerBase):
def __init__(self, name, inputs, cos_scale=1, device=None):
super(CosSimLayer, self).__init__(
name, 'cos', 1, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 2,
'The CosSimLayer expects two and only two inputs.')
config_assert(
self.get_input_layer(0).size == self.get_input_layer(1).size,
'The two inputs of CosSimLayer must have the same dimensionality.')
self.config.cos_scale = cos_scale
@config_layer('cos_vm')
class CosSimVecMatLayer(LayerBase):
def __init__(self, name, size, inputs, cos_scale=1.0, device=None):
super(CosSimVecMatLayer, self).__init__(
name, 'cos_vm', size, inputs=inputs, device=device)
self.config.cos_scale = cos_scale
config_assert(
len(self.inputs) == 2, 'The CosSimVecMatLayer must have 2 inputs.')
config_assert(
size * self.get_input_layer(0).size == self.get_input_layer(1).size,
'Wrong input size for CosSimVecMatLayer.')
@config_layer('l2_distance')
class L2DistanceLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(L2DistanceLayer, self).__init__(
name, 'l2_distance', 1, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 2, ('The L2DistanceLayer must have '
'and only have 2 inputs.'))
config_assert(
self.get_input_layer(0).size == self.get_input_layer(1).size,
('Two inputs of the L2DistanceLayer must have '
'the same dimensionality.'))
@config_layer('sampling_id')
class SamplingIdLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(SamplingIdLayer, self).__init__(
name, 'sampling_id', 0, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 1, 'SamplingIdLayer must have 1 input')
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
self.set_layer_size(input_layer.size)
# AverageLayer: "average" for each sample within a sequence.
# average_stratrgy: set to one of the following:
# 'average': plain average.
# 'sum': sum each sample instead of average (which is divide by sample_num).
# 'squarerootn': sum each sample, but divide by sqrt(sample_num).
@config_layer('average')
class AverageLayer(LayerBase):
def __init__(self,
name,
inputs,
average_strategy='average',
trans_type='non-seq',
bias=False,
stride=-1,
**xargs):
super(AverageLayer, self).__init__(
name, 'average', 0, inputs=inputs, **xargs)
self.config.average_strategy = average_strategy
if trans_type == 'seq':
config_assert(stride == -1, 'subseq does not support stride window')
self.config.trans_type = trans_type
self.config.seq_pool_stride = stride
config_assert(len(inputs) == 1, 'AverageLayer must have 1 input')
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
self.set_layer_size(input_layer.size)
self.create_bias_parameter(bias, self.config.size)
@config_layer('tensor')
class TensorLayer(LayerBase):
def __init__(self, name, size, inputs, bias=True, **xargs):
super(TensorLayer, self).__init__(
name, 'tensor', size, inputs=inputs, **xargs)
config_assert(len(self.inputs) == 2, 'TensorLayer must have 2 inputs')
config_assert(size > 0, 'size must be positive')
config_assert(inputs[1].parameter_name == None,
'second parameter should be None.')
input_layer0 = self.get_input_layer(0)
input_layer1 = self.get_input_layer(1)
psize = size * input_layer0.size * input_layer1.size
dims = [input_layer0.size, input_layer1.size, size]
self.create_input_parameter(0, psize, dims)
self.create_bias_parameter(bias, size)
@config_layer('mixed')
class MixedLayer(LayerBase):
def __init__(self, name, inputs, size=0, bias=True, **xargs):
config_assert(inputs, 'inputs cannot be empty')
super(MixedLayer, self).__init__(
name, 'mixed', size, inputs=inputs, **xargs)
operator_input_index = []
for operator in self.operators:
operator_conf = operator.operator_conf
for i in xrange(1, len(operator.input_layer_names)):
input_index = len(self.config.inputs)
operator_conf.input_indices.append(input_index)
input_config = Input(operator.input_layer_names[i])
self.inputs.append(input_config)
layer_input = self.config.inputs.add()
layer_input.input_layer_name = input_config.input_layer_name
for input_index in operator_conf.input_indices:
input_layer = self.get_input_layer(input_index)
operator_conf.input_sizes.append(input_layer.size)
operator_input_index.append(input_index)
if self.config.size == 0:
size = operator.calc_output_size(operator_conf.input_sizes)
if size != 0:
self.set_layer_size(size)
else:
sz = operator.calc_output_size(operator_conf.input_sizes)
if sz != 0:
config_assert(
sz == self.config.size,
"different inputs have different size: %s vs. %s" %
(sz, self.config.size))
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
input = self.inputs[input_index]
if input_index not in operator_input_index:
config_assert(
isinstance(input, Projection),
"input should be projection or operation")
if self.config.size == 0 and isinstance(input, Projection):
size = input.calc_output_size(input_layer)
if size != 0:
self.set_layer_size(size)
elif isinstance(input, Projection):
sz = input.calc_output_size(input_layer)
if sz != 0:
config_assert(
sz == self.config.size,
"different inputs have different size: %s vs. %s" %
(sz, self.config.size))
config_assert(size != 0, "size is not set")
for input_index in xrange(len(self.inputs)):
input = self.inputs[input_index]
if isinstance(input, Projection):
input_layer = self.get_input_layer(input_index)
input.proj_conf.input_size = input_layer.size
input.proj_conf.output_size = size
input_config = self.config.inputs[input_index]
input_config.proj_conf.CopyFrom(input.proj_conf)
input_config.proj_conf.name = gen_parameter_name(name,
input_index)
psize = input.calc_parameter_size(input_layer.size, size)
dims = input.calc_parameter_dims(input_layer.size, size)
self.create_input_parameter(input_index, psize, dims)
for operator in self.operators:
operator_conf = operator.operator_conf
operator_conf.output_size = self.config.size
operator.check_dims()
record_operator_conf = self.config.operator_confs.add()
record_operator_conf.CopyFrom(operator_conf)
psize = self.config.size
if isinstance(self.inputs[0], ConvProjection):
self.config.shared_biases = True
psize = 0
for input in self.inputs:
psize += input.calc_bias_size()
if bias:
self.config.bias_size = psize
self.create_bias_parameter(bias, psize)
# like MixedLayer, but no bias parameter
@config_func
def ExpressionLayer(name, inputs, **xargs):
MixedLayer(name, inputs, bias=False, **xargs)
@config_layer('concat')
class ConcatenateLayer(LayerBase):
layer_type = 'concat'
def __init__(self, name, inputs, bias=False, **xargs):
config_assert(inputs, 'inputs cannot be empty')
config_assert(not bias, 'ConcatenateLayer cannot support bias.')
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
if self.layer_type == "mkldnn_concat":
config_assert(use_mkldnn, "mkldnn_concat only support MKLDNN")
self.layer_type = 'mkldnn_concat' if use_mkldnn else 'concat'
super(ConcatenateLayer, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
size = 0
for input_index in xrange(len(self.inputs)):
assert self.get_input_layer(0).height == self.get_input_layer(
input_index).height
assert self.get_input_layer(0).width == self.get_input_layer(
input_index).width
assert self.get_input_layer(0).depth == self.get_input_layer(
input_index).depth
input_layer = self.get_input_layer(input_index)
input = self.inputs[input_index]
if self.config.size == 0:
size += input_layer.size
self.set_layer_height_width(self.get_input_layer(0).height, \
self.get_input_layer(0).width)
self.set_layer_depth(self.get_input_layer(0).depth)
self.set_layer_size(size)
@config_layer('mkldnn_concat')
class MKLDNNConcatLayer(ConcatenateLayer):
layer_type = 'mkldnn_concat'
# like concat layer, but each input layer was processed by a Projection.
@config_layer('concat2')
class ConcatenateLayer2(LayerBase):
def __init__(self, name, inputs, bias=False, **xargs):
config_assert(inputs, 'inputs cannot be empty')
super(ConcatenateLayer2, self).__init__(
name, 'concat2', 0, inputs=inputs, **xargs)
if isinstance(self.inputs[0], ConvProjection):
for input_index in xrange(len(self.inputs) - 1):
input = self.inputs[input_index + 1]
config_assert(
isinstance(input, ConvProjection),
"The first input of ConcatenateLayer2 is ConvProjection, "
"the other inputs should also be ConvProjection.")
size = 0
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
input = self.inputs[input_index]
output_size = input.calc_output_size(input_layer)
config_assert(output_size != 0, "proj output size is not set")
size += output_size
self.set_layer_size(size)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
input = self.inputs[input_index]
input.proj_conf.input_size = input_layer.size
input.proj_conf.output_size = input.calc_output_size(input_layer)
input_config = self.config.inputs[input_index]
input_config.proj_conf.CopyFrom(input.proj_conf)
input_config.proj_conf.name = gen_parameter_name(name, input_index)
psize = input.calc_parameter_size(input.proj_conf.input_size,
input.proj_conf.output_size)
dims = input.calc_parameter_dims(input.proj_conf.input_size,
input.proj_conf.output_size)
self.create_input_parameter(input_index, psize, dims)
psize = self.config.size
if isinstance(self.inputs[0], ConvProjection):
self.config.shared_biases = True
psize = 0
for input in self.inputs:
psize += input.calc_bias_size()
if bias:
self.config.bias_size = psize
self.create_bias_parameter(bias, psize)
@config_layer('recurrent')
class RecurrentLayer(LayerBase):
layer_type = 'recurrent'
def __init__(self, name, inputs, reversed=False, bias=True, **xargs):
use_mkl_packed = bool(
int(g_command_config_args.get("use_mkl_packed", 0)))
self.layer_type = 'mkl_packed_recurrent' if use_mkl_packed else 'recurrent'
super(RecurrentLayer, self).__init__(name, self.layer_type, 0, inputs,
**xargs)
config_assert(len(self.inputs) == 1, 'RecurrentLayer must have 1 input')
input_layer = self.get_input_layer(0)
size = input_layer.size
self.set_layer_size(size)
self.config.reversed = reversed
dims = [size, size]
self.create_input_parameter(0, size * size, dims)
self.create_bias_parameter(bias, self.config.size)
@config_layer('lstmemory')
class LstmLayer(LayerBase):
def __init__(self,
name,
inputs,
reversed=False,
active_gate_type="sigmoid",
active_state_type="sigmoid",
bias=True,
**xargs):
super(LstmLayer, self).__init__(name, 'lstmemory', 0, inputs, **xargs)
config_assert(len(self.inputs) == 1, 'LstmLayer must have 1 input')
input_layer = self.get_input_layer(0)
#check input_layer.size is divided by 4
config_assert(input_layer.size % 4 == 0, "size % 4 should be 0!")
size = input_layer.size / 4
self.set_layer_size(size)
self.config.reversed = reversed
self.config.active_gate_type = active_gate_type
self.config.active_state_type = active_state_type
self.create_input_parameter(0, size * size * 4, [size, size, 4])
#bias includes 3 kinds of peephole, 4 + 3 = 7
self.create_bias_parameter(bias, size * 7)
@config_layer('lstm_step')
class LstmStepLayer(LayerBase):
def __init__(self,
name,
size,
inputs,
active_gate_type="sigmoid",
active_state_type="sigmoid",
bias=True,
**xargs):
super(LstmStepLayer, self).__init__(name, 'lstm_step', size, inputs,
**xargs)
config_assert(len(inputs) == 2, 'LstmStepLayer must have 2 inputs')
input_layer0 = self.get_input_layer(0)
input_layer1 = self.get_input_layer(1)
config_assert(input_layer0.size == 4 * size,
'input_layer0.size != 4 * layer.size')
config_assert(input_layer1.size == size,
'input_layer1.size != layer.size')
self.config.active_gate_type = active_gate_type
self.config.active_state_type = active_state_type
self.create_bias_parameter(bias, size * 3)
# get the specific output from the input layer.
@config_layer('get_output')
class GetOutputLayer(LayerBase):
def __init__(self, name, size, inputs):
super(GetOutputLayer, self).__init__(name, 'get_output', size, inputs)
config_assert(
len(self.inputs) == 1, 'GetOutputLayer must have 1 inputs')
inputs = self.inputs[0]
config_assert(inputs.input_layer_argument,
'input_layer_argument cannot be empty')
@config_layer('mdlstmemory')
class MDLstmLayer(LayerBase):
def __init__(self,
name,
inputs,
directions=True,
active_gate_type="sigmoid",
active_state_type="sigmoid",
bias=True,
**xargs):
super(MDLstmLayer, self).__init__(name, 'mdlstmemory', 0, inputs,
**xargs)
config_assert(len(self.inputs) == 1, 'MDLstmLayer must have 1 input')
input_layer = self.get_input_layer(0)
dim_num = len(directions)
#check input_layer.size is divided by (3+dim_num)
config_assert(input_layer.size % (3 + dim_num) == 0,
"size % (dim_num) should be 0!")
size = input_layer.size / (3 + dim_num)
self.set_layer_size(size)
self.config.active_gate_type = active_gate_type
self.config.active_state_type = active_state_type
for i in xrange(len(directions)):
self.config.directions.append(int(directions[i]))
self.create_input_parameter(0, size * size * (3 + dim_num),
[size, size, 3 + dim_num])
#bias includes 3 kinds of peephole, 3+dim_num+2+dim_num
self.create_bias_parameter(bias, size * (5 + 2 * dim_num))
@config_layer('gated_recurrent')
class GatedRecurrentLayer(LayerBase):
def __init__(self,
name,
inputs,
reversed=False,
active_gate_type="sigmoid",
bias=True,
**xargs):
super(GatedRecurrentLayer, self).__init__(name, 'gated_recurrent', 0,
inputs, **xargs)
config_assert(
len(self.inputs) == 1, 'GatedRecurrentLayer must have 1 input')
input_layer = self.get_input_layer(0)
#check input_layer.size is divided by 3
config_assert(input_layer.size % 3 == 0, "size % 3 should be 0!")
size = input_layer.size / 3
self.set_layer_size(size)
self.config.reversed = reversed
self.config.active_gate_type = active_gate_type
self.create_input_parameter(0, size * size * 3, [size, size * 3])
self.create_bias_parameter(bias, size * 3)
@config_layer('gru_step')
class GruStepLayer(LayerBase):
def __init__(self,
name,
size,
inputs,
active_gate_type="sigmoid",
bias=True,
**xargs):
super(GruStepLayer, self).__init__(name, 'gru_step', size, inputs,
**xargs)
config_assert(len(self.inputs) == 2, 'GruStepLayer must have 2 input')
input_layer0 = self.get_input_layer(0)
input_layer1 = self.get_input_layer(1)
config_assert(input_layer0.size == 3 * size,
'input_layer0.size != 3 * layer.size')
config_assert(input_layer1.size == size,
'input_layer1.size != layer.size')
self.config.active_gate_type = active_gate_type
self.create_input_parameter(0, size * size * 3, [size, size * 3])
self.create_bias_parameter(bias, size * 3)
'''
A layer for calculating the cost of sequential conditional random field model.
Example: CRFLayer(name="crf_cost", size=label_num,
inputs=["output", "label", "weight"])
where "weight" is optional, one weight for each sequence
@param coeff: weight of the layer
'''
@config_layer('crf')
class CRFLayer(LayerBase):
def __init__(self, name, size, inputs, coeff=1.0, device=None):
super(CRFLayer, self).__init__(name, 'crf', size, inputs, device=device)
config_assert(2 <= len(self.inputs) <= 3,
'CRFLayer must have 2 or 3 inputs')
self.create_input_parameter(0, size * (size + 2), [size + 2, size])
self.config.coeff = coeff
'''
A layer for calculating the decoding sequence of sequential conditional
random field model.
The decoding sequence is stored in output_.ids
If a second input is provided, it is treated as the ground-truth label, and
this layer will also calculate error, output_.value[i] is 1 for incorrect
decoding or 0 for correct decoding
'''
@config_layer('crf_decoding')
class CRFDecodingLayer(LayerBase):
def __init__(self, name, size, inputs, device=None):
super(CRFDecodingLayer, self).__init__(
name, 'crf_decoding', size, inputs, device=device)
config_assert(
len(self.inputs) <= 2,
'CRFDecodingLayer cannot have more than 2 inputs')
self.create_input_parameter(0, size * (size + 2), [size + 2, size])
@config_layer('ctc')
class CTCLayer(LayerBase):
def __init__(self, name, size, inputs, norm_by_times=False, device=None):
super(CTCLayer, self).__init__(name, 'ctc', size, inputs, device=device)
self.config.norm_by_times = norm_by_times
config_assert(len(self.inputs) == 2, 'CTCLayer must have 2 inputs')
@config_layer('kmax_seq_score')
class KmaxSeqScoreLayer(LayerBase):
def __init__(self, name, inputs, beam_size, **xargs):
super(KmaxSeqScoreLayer, self).__init__(
name, 'kmax_seq_score', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1, 'KmaxSeqScoreLayer has only one input.')
self.config.beam_size = beam_size
@config_layer('warp_ctc')
class WarpCTCLayer(LayerBase):
def __init__(self,
name,
size,
inputs,
blank=0,
norm_by_times=False,
device=None):
super(WarpCTCLayer, self).__init__(
name, 'warp_ctc', size=size, inputs=inputs, device=device)
self.config.blank = blank
self.config.norm_by_times = norm_by_times
config_assert(len(self.inputs) == 2, 'WarpCTCLayer must have 2 inputs')
input_layer = self.get_input_layer(0)
config_assert(
(input_layer.active_type == '' or
input_layer.active_type == 'linear'),
"Expecting the active_type of input layer to be linear or null")
@config_layer('recurrent_layer_group')
class RecurrentLayerGroup(LayerBase):
def __init__(self, name, device=None):
super(RecurrentLayerGroup, self).__init__(
name, 'recurrent_layer_group', 0, inputs=[], device=device)
@config_layer('switch_order')
class SwitchOrderLayer(LayerBase):
def __init__(self, name, inputs, reshape, **xargs):
super(SwitchOrderLayer, self).__init__(
name, 'switch_order', 0, inputs=inputs, **xargs)
self.config.reshape_conf.height_axis.extend(reshape['height'])
self.config.reshape_conf.width_axis.extend(reshape['width'])
input_layer = self.get_input_layer(0)
if reshape is None:
self.set_layer_size(input_layer.size)
else:
in_h = input_layer.height
in_w = input_layer.width
out_dims = None
if input_layer.has_depth():
in_d = input_layer.depth
in_c = input_layer.size / in_h / in_w / in_d
# batch_size, depth, height, width, channel
out_dims = [0, in_d, in_h, in_w, in_c]
else:
in_c = input_layer.size / in_h / in_w
# batch_size, height, width, channel
out_dims = [0, in_h, in_w, in_c]
# Because (reshape['width'][0] > 0) always be true.
# So out_dims[0] won't be used.
size = reduce(lambda x, y: x * y, out_dims[reshape['width'][0]:])
self.set_layer_size(size)
@config_layer('scale_sub_region')
class ScaleSubRegionLayer(LayerBase):
def __init__(self, name, inputs, value, **xargs):
super(ScaleSubRegionLayer, self).__init__(
name, 'scale_sub_region', 0, inputs=inputs, **xargs)
scale_sub_region_conf = self.config.inputs[0].scale_sub_region_conf
scale_sub_region_conf.value = value
# get channel, width and height from input_0 layer
input_layer = self.get_input_layer(0)
image_conf = scale_sub_region_conf.image_conf
image_conf.img_size = input_layer.width
image_conf.img_size_y = input_layer.height
image_conf.channels = input_layer.size / (input_layer.width *
input_layer.height)
self.set_cnn_layer(name, image_conf.img_size_y, image_conf.img_size,
image_conf.channels)
@config_layer('factorization_machine')
class FactorizationMachineLayer(LayerBase):
def __init__(self, name, inputs, factor_size, **xargs):
super(FactorizationMachineLayer, self).__init__(
name, 'factorization_machine', size=1, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'factorization machine layer must have one and only one input.')
self.config.factor_size = factor_size
input_layer = self.get_input_layer(0)
psize = input_layer.size * factor_size
dims = [input_layer.size, factor_size]
self.create_input_parameter(0, psize, dims)
# Deprecated, use a new layer specific class instead
@config_func
def Layer(name, type, **xargs):
layers = {}
layers.update(g_cost_map)
layers.update(g_layer_type_map)
layer_func = layers.get(type)
config_assert(layer_func, "layer type '%s' not supported." % type)
return layer_func(name, **xargs)
@config_func
def ParameterHook(type, **kwargs):
if type == 'pruning':
hook = ParameterUpdaterHookConfig()
hook.type = type
sparsity_ratio = kwargs.get('sparsity_ratio', None)
if sparsity_ratio is not None:
hook.sparsity_ratio = sparsity_ratio
return hook
elif type == 'dpruning':
hook = ParameterUpdaterHookConfig()
hook.type = type
return hook
else:
return None
@config_func
def Parameter(name,
size,
device,
dims,
learning_rate=None,
momentum=None,
decay_rate=None,
decay_rate_l1=None,
initial_mean=None,
initial_std=None,
initial_strategy=None,
initial_smart=None,
num_batches_regularization=None,
sparse_remote_update=None,
sparse_update=None,
gradient_clipping_threshold=None,
sparse=None,
format=None,
need_compact=None,
is_static=None,
is_shared=None,
update_hooks=None,
initializer=None):
config_assert(name not in g_parameter_map,
'Duplicated parameter name: ' + name)
para = g_config.model_config.parameters.add()
para.name = name
para.size = size
if device is not None:
para.device = int(device)
para.dims.extend(dims)
if learning_rate is not None:
para.learning_rate = float(learning_rate)
momentum = default(momentum, g_default_momentum)
if momentum is not None:
para.momentum = float(momentum)
config_assert(not momentum or not decay_rate_l1,
"momentum and decay_rate_l1 cannot both be non-zero")
decay_rate = default(decay_rate, g_default_decay_rate)
if decay_rate is not None:
para.decay_rate = decay_rate
if decay_rate_l1 is not None:
para.decay_rate_l1 = decay_rate_l1
para.initial_std = default(initial_std, g_default_initial_std)
para.initial_mean = default(initial_mean, g_default_initial_mean)
num_batches_regularization = default(num_batches_regularization,
g_default_num_batches_regularization)
if num_batches_regularization is not None:
para.num_batches_regularization = int(num_batches_regularization)
if sparse_remote_update is not None:
para.sparse_remote_update = sparse_remote_update
if sparse_remote_update:
g_config.opt_config.use_sparse_remote_updater = True
if sparse_update is not None:
para.sparse_update = sparse_update
gradient_clipping_threshold = default(gradient_clipping_threshold,
g_default_gradient_clipping_threshold)
if gradient_clipping_threshold is not None:
para.gradient_clipping_threshold = gradient_clipping_threshold
para.initial_strategy = default(initial_strategy,
g_default_initial_strategy)
para.initial_smart = default(initial_smart, g_default_initial_smart)
if para.initial_smart:
para.initial_mean = 0.
if len(para.dims) != 0:
para.initial_std = 1. / math.sqrt(para.dims[0])
else:
print(
"Use initial_smart, but dims not set. Initial_smart may not be used in this layer"
)
traceback.print_exc()
para.initial_std = 1. / math.sqrt(para.size)
if g_default_compact_func is not None:
sparse, format, need_compact = g_default_compact_func(para.name)
if sparse is not None:
para.is_sparse = sparse
if format is not None:
para.format = format
if need_compact is not None:
para.need_compact = need_compact
if is_static is not None:
para.is_static = is_static
config_assert(not para.sparse_remote_update or not para.is_static,
"sparse_remote_update and is_static cannot both be true")
if is_shared is not None:
para.is_shared = is_shared
update_hooks = default(update_hooks, g_default_update_hooks)
if update_hooks is not None:
if hasattr(update_hooks, '__call__'):
update_hooks = update_hooks()
if isinstance(update_hooks, list):
for hook in update_hooks:
para.update_hooks.extend([hook])
else:
para.update_hooks.extend([update_hooks])
g_parameter_map[name] = para
if initializer is not None:
config_assert(
callable(initializer),
"parameter initializer should be a callable object")
g_parameter_initializer_map[name] = initializer
@config_func
def default_initial_std(val):
global g_default_initial_std
g_default_initial_std = val
@config_func
def default_initial_mean(val):
global g_default_initial_mean
g_default_initial_mean = val
@config_func
def default_initial_strategy(val):
global g_default_initial_strategy
g_default_initial_strategy = val
@config_func
def default_initial_smart(val):
global g_default_initial_smart
g_default_initial_smart = val
@config_func
def default_momentum(val):
global g_default_momentum
g_default_momentum = val
@config_func
def default_decay_rate(val):
global g_default_decay_rate
g_default_decay_rate = val
@config_func
def default_num_batches_regularization(val):
global g_default_num_batches_regularization
g_default_num_batches_regularization = val
@config_func
def default_gradient_clipping_threshold(val):
global g_default_gradient_clipping_threshold
g_default_gradient_clipping_threshold = val
@config_func
def default_device(val):
global g_default_device
g_default_device = val
@config_func
def default_update_hooks(val):
global g_default_update_hooks
g_default_update_hooks = val
@config_func
def default_compact_func(val):
global g_default_compact_func
g_default_compact_func = val
def make_importer(config_dir, config_args):
def Import(config_file, local_args={}):
if not config_file.startswith('/'):
config_file = config_dir + '/' + config_file
g_config.config_files.append(config_file)
execfile(config_file,
make_config_environment(config_file, config_args), local_args)
return Import
DEFAULT_SETTING = dict(
batch_size=None,
mini_batch_size=None,
algorithm='async_sgd',
async_lagged_grad_discard_ratio=1.5,
learning_method='momentum',
gradient_clipping_threshold=None,
num_batches_per_send_parameter=None,
num_batches_per_get_parameter=None,
center_parameter_update_method=None,
learning_rate=1.,
learning_rate_decay_a=0.,
learning_rate_decay_b=0.,
learning_rate_schedule='poly',
learning_rate_args='',
l1weight=0.1,
l2weight=0.,
l2weight_zero_iter=0,
c1=0.0001,
backoff=0.5,
owlqn_steps=10,
max_backoff=5,
average_window=0,
do_average_in_cpu=False,
max_average_window=None,
ada_epsilon=1e-6,
ada_rou=0.95,
delta_add_rate=1.0,
shrink_parameter_value=0,
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-8, )
settings = copy.deepcopy(DEFAULT_SETTING)
settings_deprecated = dict(usage_ratio=1., )
trainer_settings = dict(
save_dir="./output/model",
init_model_path=None,
start_pass=0, )
@config_func
def Settings(**args):
for k, v in args.iteritems():
if k == "usage_ratio":
logger.warning(
"Deprecated: define usage_ratio in DataConfig instead")
if g_config.HasField("data_config"):
g_config.data_config.__setattr__(k, v)
settings_deprecated[k] = v
continue
elif k in settings:
settings[k] = v
elif k in trainer_settings:
trainer_settings[k] = v
else:
logger.fatal('Unkown setting: %s' % k)
@config_func
def cluster_config(**args):
pass
@config_func
def EnableSubmodelSuffix(flag=True):
"""
If enabled, the layer and evaluator names in submodel will be automatically
appended with @submodel_name
"""
global g_add_submodel_suffix
g_add_submodel_suffix = flag
def make_config_environment(config_file, config_args):
def make_setter(k):
def setter(v):
logger.fatal("Obsolete: use Settings(%s=%s, ...) instead" % (k, v))
return setter
funcs = {}
funcs.update(g_config_funcs)
for k in settings.iterkeys():
funcs[k] = make_setter(k)
for k in settings_deprecated.iterkeys():
funcs[k] = make_setter(k)
config_dir = os.path.dirname(config_file)
if not config_dir:
config_dir = '.'
funcs.update(
Import=make_importer(config_dir, config_args),
get_config_arg=make_get_config_arg(config_args), )
funcs.update(g_extended_config_funcs)
return funcs
def make_get_config_arg(config_args):
def get_config_arg(name, type, default=None):
if type == bool:
s = config_args.get(name)
if not s:
return default
if s == 'True' or s == '1' or s == 'true':
return True
if s == 'False' or s == '0' or s == 'false':
return False
raise ValueError('Value of config_arg %s is not boolean' % name)
else:
return type(config_args.get(name, default))
return get_config_arg
def importlib(name):
__import__(name)
return sys.modules[name]
def find_caller():
stack = traceback.extract_stack()
for s in stack[-4::-1]:
if not s[0].endswith('config_parser.py'):
return s[0], s[1], s[2]
return "(unknown file)", 0, "(unknown function)"
def my_fatal(s):
logger.critical(s)
raise Exception()
_parse_config_hooks = set()
def register_parse_config_hook(f):
"""
Register a hook function for parse_config. parse_config will invoke the hook
at the beginning of parse. This make it possible to reset global state for
for constructing the model.
"""
_parse_config_hooks.add(f)
def update_g_config():
'''
Update g_config after execute config_file or config_functions.
'''
for k, v in settings.iteritems():
if v is None:
continue
g_config.opt_config.__setattr__(k, v)
for k, v in trainer_settings.iteritems():
if v is None:
continue
g_config.__setattr__(k, v)
for name in g_config.model_config.input_layer_names:
assert name in g_layer_map, \
'input name "%s" does not correspond to a layer name' % name
assert (g_layer_map[name].type == "data" or g_layer_map[name].type == "data_trim"), \
'The type of input layer "%s" is not "data"' % name
for name in g_config.model_config.output_layer_names:
assert name in g_layer_map, \
'input name "%s" does not correspond to a layer name' % name
return g_config
def begin_parse():
init_config_environment()
for hook in _parse_config_hooks:
hook()
logger.findCaller = find_caller
logger.fatal = my_fatal
g_config.model_config.type = "nn"
global g_current_submodel, g_root_submodel
g_root_submodel = g_config.model_config.sub_models.add()
g_root_submodel.name = 'root'
g_root_submodel.is_recurrent_layer_group = False
g_current_submodel = g_root_submodel
def parse_config(trainer_config, config_arg_str):
'''
@param config_arg_str: a string of the form var1=val1,var2=val2. It will be
passed to config script as a dictionary CONFIG_ARGS
'''
begin_parse()
config_args = {}
if config_arg_str:
config_args = dict([f.split('=') for f in config_arg_str.split(',')])
global g_command_config_args
g_command_config_args.update(config_args)
extension_module_name = config_args.get('extension_module_name')
if extension_module_name:
global g_extended_config_funcs
extension_module = importlib(extension_module_name)
g_extended_config_funcs = extension_module.get_config_funcs(g_config)
if hasattr(trainer_config, '__call__'):
trainer_config.func_globals.update(
make_config_environment("", config_args))
trainer_config()
else:
execfile(trainer_config,
make_config_environment(trainer_config, config_args))
return update_g_config()
def parse_config_and_serialize(trainer_config, config_arg_str):
try:
config = parse_config(trainer_config, config_arg_str)
#logger.info(config)
return config.SerializeToString()
except:
traceback.print_exc()
raise
if __name__ == '__main__':
try:
config = parse_config(sys.argv[1], '')
config.SerializeToString()
__real_print__(str(config))
except:
traceback.print_exc()
raise
| 166,008 | 36.322167 | 111 | py |
Paddle | Paddle-master/python/paddle/utils/predefined_net.py | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
from paddle.trainer.config_parser import *
from paddle.utils.preprocess_img import \
ImageClassificationDatasetCreater
from paddle.trainer_config_helpers import *
def image_data(data_dir,
processed_image_size,
overwrite=False,
color=True,
train_list="batches/train.list",
test_list="batches/test.list",
meta_file="batches/batches.meta",
use_jpeg=1):
"""
Predefined image data provider for image classification.
train_list: a text file containing a list of training batches.
test_list: a text file containing a list of test batches.
processed_image_size: all the input images will be resized into this size.
If the image is not square. Then the shorter edge will be resized into
this size, and the aspect ratio is kept the same.
color: whether the images are color or gray.
meta_path: the path of the meta file that stores the mean image file and
other dataset information, such as the size of images,
the size of the mean image, the number of classes.
async_load_data: whether to load image data asynchronuously.
"""
data_creator = ImageClassificationDatasetCreater(
data_dir, processed_image_size, color)
batch_data_dir = data_dir
train_list = os.path.join(batch_data_dir, train_list)
test_list = os.path.join(batch_data_dir, test_list)
meta_path = os.path.join(batch_data_dir, meta_file)
image_size = processed_image_size
conf = np.load(meta_path)
mean_image_size = conf["mean_image_size"]
is_color = conf["color"]
num_classes = conf["num_classes"]
color_string = "color" if is_color else "gray"
args = {
'meta': meta_path,
'mean_img_size': mean_image_size,
'img_size': image_size,
'num_classes': num_classes,
'use_jpeg': use_jpeg != 0,
'color': color_string
}
define_py_data_sources2(
train_list,
test_list,
module='image_provider',
obj='processData',
args=args)
return {
"image_size": image_size,
"num_classes": num_classes,
"is_color": is_color
}
def get_extra_layer_attr(drop_rate):
if drop_rate == 0:
return None
else:
return ExtraLayerAttribute(drop_rate=drop_rate)
def image_data_layers(image_size, num_classes, is_color=False,
is_predict=False):
"""
Data layers for image classification.
image_size: image size.
num_classes: num of classes.
is_color: whether the input images are color.
is_predict: whether the network is used for prediction.
"""
num_image_channels = 3 if is_color else 1
data_input = data_layer("input",
image_size * image_size * num_image_channels)
if is_predict:
return data_input, None, num_image_channels
else:
label_input = data_layer("label", 1)
return data_input, label_input, num_image_channels
def simple_conv_net(data_conf, is_color=False):
"""
A Wrapper for a simple network for MNIST digit recognition.
It contains two convolutional layers, one fully conencted layer, and
one softmax layer.
data_conf is a dictionary with the following keys:
image_size: image size.
num_classes: num of classes.
is_color: whether the input images are color.
"""
for k, v in data_conf.iteritems():
globals()[k] = v
data_input, label_input, num_image_channels = \
image_data_layers(image_size, num_classes, is_color, is_predict)
filter_sizes = [5, 5]
num_channels = [32, 64]
strides = [1, 1]
fc_dims = [500]
conv_bn_pool1 = img_conv_bn_pool(
name="g1",
input=data_input,
filter_size=filter_sizes[0],
num_channel=num_image_channels,
num_filters=num_channels[0],
conv_stride=1,
conv_padding=0,
pool_size=3,
pool_stride=2,
act=ReluActivation())
conv_bn_pool2 = img_conv_bn_pool(
name="g2",
input=conv_bn_pool1,
filter_size=filter_sizes[1],
num_channel=num_channels[0],
num_filters=num_channels[1],
conv_stride=1,
conv_padding=0,
pool_size=3,
pool_stride=2,
act=ReluActivation())
fc3 = fc_layer(
name="fc3", input=conv_bn_pool2, dim=fc_dims[0], act=ReluActivation())
fc3_dropped = dropout_layer(name="fc3_dropped", input=fc3, dropout_rate=0.5)
output = fc_layer(
name="output",
input=fc3_dropped,
dim=fc_dims[0],
act=SoftmaxActivation())
if is_predict:
end_of_network(output)
else:
cost = classify(name="cost", input=output, label=label_input)
end_of_network(cost)
def conv_layer_group(prefix_num,
num_layers,
input,
input_channels,
output_channels,
drop_rates=[],
strides=[],
with_bn=[]):
"""
A set of convolution layers, and batch normalization layers,
followed by one pooling layer.
It is utilized in VGG network for image classifcation.
prefix_num: the prefix number of the layer names.
For example, if prefix_num = 1, the first convolutioal layer's
name will be conv_1_1.
num_layers: number of the convolutional layers.
input: the name of the input layer.
input_channels: the number of channels of the input feature map.
output_channels: the number of channels of the output feature map.
drop_rates: the drop rates of the BN layers. It will be all zero by default.
strides: the stride of the convolution for the layers.
It will be all 1 by default.
with_bn: whether to use Batch Normalization for Conv layers.
By default, it is all false.
"""
if len(drop_rates) == 0: drop_rates = [0] * num_layers
if len(strides) == 0: strides = [1] * num_layers
if len(with_bn) == 0: with_bn = [False] * num_layers
assert (len(drop_rates) == num_layers)
assert (len(strides) == num_layers)
for i in range(1, num_layers + 1):
if i == 1:
i_conv_in = input
else:
i_conv_in = group_output
i_channels_conv = input_channels if i == 1 else output_channels
conv_act = LinearActivation() if with_bn[i - 1] else ReluActivation()
conv_output = img_conv_layer(
name="conv%d_%d" % (prefix_num, i),
input=i_conv_in,
filter_size=3,
num_channels=i_channels_conv,
num_filters=output_channels,
stride=strides[i - 1],
padding=1,
act=conv_act)
if with_bn[i - 1]:
bn = batch_norm_layer(
name="conv%d_%d_bn" % (prefix_num, i),
input=conv_output,
num_channels=output_channels,
act=ReluActivation(),
layer_attr=get_extra_layer_attr(drop_rate=drop_rates[i - 1]))
group_output = bn
else:
group_output = conv_output
pool = img_pool_layer(
name="pool%d" % prefix_num,
input=group_output,
pool_size=2,
num_channels=output_channels,
stride=2)
return pool
def vgg_conv_net(image_size,
num_classes,
num_layers,
channels,
strides,
with_bn,
fc_dims,
drop_rates,
drop_rates_fc=[],
is_color=True,
is_predict=False):
"""
A Wrapper for a VGG network for image classification.
It is a set of convolutional groups followed by several fully
connected layers, and a cross-entropy classifiation loss.
The detailed architecture of the paper can be found here:
Very Deep Convolutional Networks for Large-Scale Visual Recognition
http://www.robots.ox.ac.uk/~vgg/research/very_deep/
image_size: image size.
num_classes: num of classes.
num_layers: the number of layers for all the convolution groups.
channels: the number of output filters for all the convolution groups.
with_bn: whether each layer of a convolution group is followed by a
batch normalization.
drop_rates: the dropout rates for all the convolutional layers.
fc_dims: the dimension for all the fully connected layers.
is_color: whether the input images are color.
"""
data_input, label_input, num_image_channels = \
image_data_layers(image_size, num_classes, is_color, is_predict)
assert (len(num_layers) == len(channels))
assert (len(num_layers) == len(strides))
assert (len(num_layers) == len(with_bn))
num_fc_layers = len(fc_dims)
assert (num_fc_layers + 1 == len(drop_rates_fc))
for i in range(len(num_layers)):
input_layer = data_input if i == 0 else group_output
input_channels = 3 if i == 0 else channels[i - 1]
group_output = conv_layer_group(
prefix_num=i + 1,
num_layers=num_layers[i],
input=input_layer,
input_channels=input_channels,
output_channels=channels[i],
drop_rates=drop_rates[i],
strides=strides[i],
with_bn=with_bn[i])
conv_output_name = group_output
if drop_rates_fc[0] != 0.0:
dropped_pool_name = "pool_dropped"
conv_output_name = dropout_layer(
name=dropped_pool_name,
input=conv_output_name,
dropout_rate=drop_rates_fc[0])
for i in range(len(fc_dims)):
input_layer_name = conv_output_name if i == 0 else fc_output
active_type = LinearActivation() if i == len(
fc_dims) - 1 else ReluActivation()
drop_rate = 0.0 if i == len(fc_dims) - 1 else drop_rates_fc[i + 1]
fc_output = fc_layer(
name="fc%d" % (i + 1),
input=input_layer_name,
size=fc_dims[i],
act=active_type,
layer_attr=get_extra_layer_attr(drop_rate))
bn = batch_norm_layer(
name="fc_bn",
input=fc_output,
num_channels=fc_dims[len(fc_dims) - 1],
act=ReluActivation(),
layer_attr=get_extra_layer_attr(drop_rate=drop_rates_fc[-1]))
output = fc_layer(
name="output", input=bn, size=num_classes, act=SoftmaxActivation())
if is_predict:
outputs(output)
else:
cost = classification_cost(name="cost", input=output, label=label_input)
outputs(cost)
def vgg16_conv_net(image_size, num_classes, is_color=True, is_predict=False):
"""
A Wrapper for a 16 layers VGG network for image classification.
The detailed architecture of the paper can be found here:
Very Deep Convolutional Networks for Large-Scale Visual Recognition
http://www.robots.ox.ac.uk/~vgg/research/very_deep/
image_size: image size.
num_classes: num of classes.
is_color: whether the input images are color.
"""
vgg_conv_net(image_size, num_classes,
num_layers=[2, 2, 3, 3, 3],
channels=[64, 128, 256, 512, 512],
strides=[[], [], [], [], []],
with_bn=[[False, True], [False, True], [False, False, True], \
[False, False, True], [False, False, True]],
drop_rates=[[]] * 5,
drop_rates_fc=[0.0, 0.5, 0.5],
fc_dims=[4096, 4096],
is_predict=is_predict)
def small_vgg(data_conf, is_predict=False):
"""
A Wrapper for a small VGG network for CIFAR-10 image classification.
The detailed architecture of the paper can be found here:
92.45% on CIFAR-10 in Torch
http://torch.ch/blog/2015/07/30/cifar.html
Due to the constraints of CuDNN, it only has four convolutional groups
rather than five.
Thus, it only achieves 91.2% test accuracy and 98.1% training accuracy.
data_conf is a dictionary with the following keys:
image_size: image size.
num_classes: num of classes.
is_color: whether the input images are color.
"""
for k, v in data_conf.iteritems():
globals()[k] = v
vgg_conv_net(image_size, num_classes,
num_layers=[2, 2, 3, 3],
channels=[64, 128, 256, 512],
strides=[[], [], [], []],
with_bn=[[True, True], [True, True], [True, True, True], \
[True, True, True]],
drop_rates=[[0.3, 0.0], [0.4, 0.0],
[0.4, 0.4, 0.0], [0.4, 0.4, 0.0]],
drop_rates_fc=[0.5, 0.5],
fc_dims=[512],
is_predict=is_predict)
def training_settings(learning_rate=0.1,
batch_size=128,
algorithm="sgd",
momentum=0.9,
decay_rate=0.001):
"""
Training settings.
learning_rate: learning rate of the training.
batch_size: the size of each training batch.
algorithm: training algorithm, can be
- sgd
- adagrad
- adadelta
- rmsprop
momentum: momentum of the training algorithm.
decay_rate: weight decay rate.
"""
Settings(
algorithm=algorithm,
batch_size=batch_size,
learning_rate=learning_rate / float(batch_size))
default_momentum(momentum)
default_decay_rate(decay_rate * batch_size)
| 14,269 | 36.454068 | 80 | py |
Paddle | Paddle-master/python/paddle/utils/torch2paddle.py | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert torch parameter file to paddle model files.
Note: must have torchfile installed in order to use this tool.
Usage: python torch2paddle.py -i torchfile.t7 -l layers.txt -o path/to/paddle_model
"""
import os
import sys
import struct
import numpy as np
import torchfile
import cPickle as pickle
import argparse
# save parameters
def save_layer_parameters(outfile, feats):
version = 0
value_size = 4
ret = ""
for feat in feats:
ret += feat.tostring()
size = len(ret) / 4
fo = open(outfile, 'wb')
fo.write(struct.pack('iIQ', version, value_size, size))
fo.write(ret)
fo.close()
def save_net_parameters(layers, params, output_path):
for i in range(len(layers)):
weight = params[i * 2]
biases = params[i * 2 + 1]
weight_file = os.path.join(output_path, '_%s.w0' % layers[i])
biases_file = os.path.join(output_path, '_%s.wbias' % layers[i])
print "Saving for layer %s." % layers[i]
save_layer_parameters(weight_file, [weight])
save_layer_parameters(biases_file, biases)
def load_layer_parameters(filename):
fn = open(filename, 'rb')
version, = struct.unpack('i', fn.read(4))
value_length, = struct.unpack("I", fn.read(4))
dtype = 'float32' if value_length == 4 else 'float64'
param_size, = struct.unpack("L", fn.read(8))
value = np.fromfile(fn, dtype)
return value
def main(argv):
"""
main method of converting torch to paddle files.
:param argv:
:return:
"""
cmdparser = argparse.ArgumentParser(
"Convert torch parameter file to paddle model files.")
cmdparser.add_argument(
'-i', '--input', help='input filename of torch parameters')
cmdparser.add_argument('-l', '--layers', help='list of layer names')
cmdparser.add_argument(
'-o', '--output', help='output file path of paddle model')
args = cmdparser.parse_args(argv)
if args.input and args.layers and args.output:
params = torchfile.load(args.input)
layers = [line.strip() for line in open(args.layers, 'r')]
save_net_parameters(layers, params, args.output)
else:
print(
'Usage: python torch2paddle.py -i torchfile.t7 -l layers.txt -o path/to/paddle_model'
)
if __name__ == "__main__":
main(sys.argv[1:])
| 2,946 | 30.688172 | 97 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/average-checkpoints.py | """This file is nearly word-for-word taken from the folder tools in OpenNMT"""
import pkg_resources
import argparse
import torch
import os
def average_checkpoints(checkpoint_files):
vocab = None
opt = None
avg_model = None
avg_generator = None
for i, checkpoint_file in enumerate(checkpoint_files):
m = torch.load(checkpoint_file, map_location='cpu')
model_weights = m['model']
generator_weights = m['generator']
if i == 0:
vocab, opt = m['vocab'], m['opt']
avg_model = model_weights
avg_generator = generator_weights
else:
for (k, v) in avg_model.items():
avg_model[k].mul_(i).add_(model_weights[k]).div_(i + 1)
for (k, v) in avg_generator.items():
avg_generator[k].mul_(i).add_(generator_weights[k]).div_(i + 1)
return {"vocab": vocab, 'opt': opt, 'optim': None,
"generator": avg_generator, "model": avg_model}
def main():
parser = argparse.ArgumentParser(description='This script merges checkpoints of the same model')
parser.add_argument('--folder', dest="folder", help="experiment name")
parser.add_argument('--steps', dest="steps", nargs="+", help="checkpoints step numbers")
args = parser.parse_args()
expfolder = pkg_resources.resource_filename(__name__, 'experiments')
model_folder = os.path.join(expfolder, args.folder, 'models')
assert os.path.exists(model_folder), f'{model_folder} is not a valid folder'
checkpoint_files = [os.path.join(model_folder, f'model_step_{step}.pt') for step in args.steps]
avg_cp = average_checkpoints(checkpoint_files)
torch.save(avg_cp, os.path.join(model_folder, 'avg_model.pt'))
if __name__ == "__main__":
main()
| 1,847 | 33.867925 | 100 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/batch_translate.py | import subprocess
import functools
import argparse
import torch
import os
import re
partial_shell= = functools.partial(subprocess.run, shell=True,
stdout=subprocess.PIPE)
def shell(cmd):
"""Execute cmd as if from the command line"""
completed_process = partial_shell(cmd)
return completed_process.stdout.decde('utf8')
if __name__ == "__main__":
parser = argparse.ArgumentParser() | 434 | 23.166667 | 62 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/opts.py | """ Implementation of all available options """
from __future__ import print_function
import configargparse
from onmt.models.sru import CheckSRU
def config_opts(parser):
parser.add('-config', '--config', required=False,
is_config_file_arg=True, help='config file path')
parser.add('-save_config', '--save_config', required=False,
is_write_out_config_file_arg=True,
help='config file save path')
def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Embedding Options
group = parser.add_argument_group('Model-Embeddings')
group.add('--src_word_vec_size', '-src_word_vec_size',
type=int, default=500,
help='Word embedding size for src.')
group.add('--tgt_word_vec_size', '-tgt_word_vec_size',
type=int, default=500,
help='Word embedding size for tgt.')
group.add('--word_vec_size', '-word_vec_size', type=int, default=-1,
help='Word embedding size for src and tgt.')
group.add('--share_decoder_embeddings', '-share_decoder_embeddings',
action='store_true',
help="Use a shared weight matrix for the input and "
"output word embeddings in the decoder.")
group.add('--share_embeddings', '-share_embeddings', action='store_true',
help="Share the word embeddings between encoder "
"and decoder. Need to use shared dictionary for this "
"option.")
group.add('--position_encoding', '-position_encoding', action='store_true',
help="Use a sin to mark relative words positions. "
"Necessary for non-RNN style models.")
group = parser.add_argument_group('Model-Embedding Features')
group.add('--feat_merge', '-feat_merge', type=str, default='concat',
choices=[None, 'concat', 'sum', 'mlp'],
help="Merge action for incorporating features embeddings. "
"Options [concat|sum|mlp].")
group.add('--feat_merge_activation', '-feat_merge_activation',
type=str, default='ReLU', choices = [None, 'ReLU', 'Tanh'])
group.add('--feat_vec_size', '-feat_vec_size', type=int, default=-1,
help="If specified, feature embedding sizes "
"will be set to this. Otherwise, feat_vec_exponent "
"will be used.")
group.add('--feat_vec_exponent', '-feat_vec_exponent',
type=float, default=0.7,
help="If -feat_merge_size is not set, feature "
"embedding sizes will be set to N^feat_vec_exponent "
"where N is the number of values the feature takes.")
# Encoder-Decoder Options
group = parser.add_argument_group('Model- Encoder-Decoder')
group.add('--model_type', '-model_type', default='text',
choices=['text', 'table', 'img', 'audio', 'vec'],
help="Type of source model to use. Allows "
"the system to incorporate non-text inputs. "
"Options are [text|img|audio|vec].")
group.add('--model_dtype', '-model_dtype', default='fp32',
choices=['fp32', 'fp16'],
help='Data type of the model.')
group.add('--encoder_type', '-encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'htransformer','cnn'],
help="Type of encoder layer to use. Non-RNN layers "
"are experimental. Options are "
"[rnn|brnn|mean|transformer|cnn].")
group.add('--decoder_type', '-decoder_type', type=str, default='rnn',
choices=['rnn', 'hrnn', 'transformer', 'cnn'],
help="Type of decoder layer to use. Non-RNN layers "
"are experimental. Options are "
"[rnn|hrrn|transformer|cnn].")
group.add('--layers', '-layers', type=int, default=-1,
help='Number of layers in enc/dec.')
# Encoder
group.add('--enc_layers', '-enc_layers', type=int, default=2,
help='Number of layers in the encoder')
group.add('--units_layers', '-units_layers', type=int, default=2,
help='Number of layers in the units encoder (when hierarchical)')
group.add('--chunks_layers', '-chunks_layers', type=int, default=2,
help='Number of layers in the chunks encoder (when hierarchical)')
group.add('--glu_depth', '-glu_depth', type=int, default=2,
help='Number of glu layers in the encoder (when hierarchical)')
group.add('--units_glu_depth', '-units_glu_depth', type=int, default=2,
help='Number of glu layers in the units_encoder (when hierarchical)')
group.add('--chunks_glu_depth', '-chunks_glu_depth', type=int, default=2,
help='Number of glu layers in the chunks_encoder (when hierarchical)')
group.add('--units_heads', '-units_heads', type=int, default=2,
help='Number of heads in the units encoder (when hierarchical)')
group.add('--chunks_heads', '-chunks_heads', type=int, default=2,
help='Number of heads in the chunks encoder (when hierarchical)')
# Decoder
group.add('--dec_layers', '-dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add('--rnn_size', '-rnn_size', type=int, default=-1,
help="Size of rnn hidden states. Overwrites "
"enc_rnn_size and dec_rnn_size")
group.add('--enc_rnn_size', '-enc_rnn_size', type=int, default=500,
help="Size of encoder rnn hidden states. "
"Must be equal to dec_rnn_size except for "
"speech-to-text.")
group.add('--dec_rnn_size', '-dec_rnn_size', type=int, default=500,
help="Size of decoder rnn hidden states. "
"Must be equal to enc_rnn_size except for "
"speech-to-text.")
group.add('--audio_enc_pooling', '-audio_enc_pooling',
type=str, default='1',
help="The amount of pooling of audio encoder, "
"either the same amount of pooling across all layers "
"indicated by a single number, or different amounts of "
"pooling per layer separated by comma.")
group.add('--cnn_kernel_width', '-cnn_kernel_width', type=int, default=3,
help="Size of windows in the cnn, the kernel_size is "
"(cnn_kernel_width, 1) in conv layer")
group.add('--input_feed', '-input_feed', type=int, default=1,
help="Feed the context vector at each time step as "
"additional input (via concatenation with the word "
"embeddings) to the decoder.")
group.add('--bridge', '-bridge', action="store_true",
help="Have an additional layer between the last encoder "
"state and the first decoder state")
group.add('--rnn_type', '-rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
action=CheckSRU,
help="The gate type to use in the RNNs")
# group.add('--residual', '-residual', action="store_true",
# help="Add residual connections between RNN layers.")
group.add('--brnn', '-brnn', action=DeprecateAction,
help="Deprecated, use `encoder_type`.")
group.add('--context_gate', '-context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="Type of context gate to use. "
"Do not select for no context gate.")
# Attention options
group = parser.add_argument_group('Model- Attention')
group.add('--global_attention', '-global_attention',
type=str, default='general',
choices=['dot', 'general', 'mlp', 'none'],
help="The attention type to use: "
"dotprod or general (Luong) or MLP (Bahdanau)")
group.add('--global_attention_function', '-global_attention_function',
type=str, default="softmax", choices=["softmax", "sparsemax"])
group.add('--self_attn_type', '-self_attn_type',
type=str, default="scaled-dot",
help='Self attention type in Transformer decoder '
'layer -- currently "scaled-dot" or "average" ')
group.add('--max_relative_positions', '-max_relative_positions',
type=int, default=0,
help="Maximum distance between inputs in relative "
"positions representations. "
"For more detailed information, see: "
"https://arxiv.org/pdf/1803.02155.pdf")
group.add('--heads', '-heads', type=int, default=8,
help='Number of heads for transformer self-attention')
group.add('--transformer_ff', '-transformer_ff', type=int, default=2048,
help='Size of hidden transformer feed-forward')
group.add('--aan_useffn', '-aan_useffn', action="store_true",
help='Turn on the FFN layer in the AAN decoder')
group.add('--use_pos', '-use_pos', action='store_true',
help='Use position (ie column name) instead of value for the hierarchical attention on the units level')
# Alignement options
group = parser.add_argument_group('Model - Alignement')
group.add('--lambda_align', '-lambda_align', type=float, default=0.0,
help="Lambda value for alignement loss of Garg et al (2019)"
"For more detailed information, see: "
"https://arxiv.org/abs/1909.02074")
group.add('--alignment_layer', '-alignment_layer', type=int, default=-3,
help='Layer number which has to be supervised.')
group.add('--alignment_heads', '-alignment_heads', type=int, default=None,
help='N. of cross attention heads per layer to supervised with')
group.add('--full_context_alignment', '-full_context_alignment',
action="store_true",
help='Whether alignment is conditioned on full target context.')
# Generator and loss options.
group = parser.add_argument_group('Generator')
group.add('--copy_attn', '-copy_attn', action="store_true",
help='Train copy attention layer.')
group.add('--copy_attn_type', '-copy_attn_type',
type=str, default=None,
choices=['dot', 'general', 'mlp', 'none'],
help="The copy attention type to use. Leave as None to use "
"the same as -global_attention.")
group.add('--generator_function', '-generator_function', default="softmax",
choices=["softmax", "sparsemax"],
help="Which function to use for generating "
"probabilities over the target vocabulary (choices: "
"softmax, sparsemax)")
group.add('--copy_attn_force', '-copy_attn_force', action="store_true",
help='When available, train to copy.')
group.add('--reuse_copy_attn', '-reuse_copy_attn', action="store_true",
help="Reuse standard attention for copy")
group.add('--copy_loss_by_seqlength', '-copy_loss_by_seqlength',
action="store_true",
help="Divide copy loss by length of sequence")
group.add('--coverage_attn', '-coverage_attn', action="store_true",
help='Train a coverage attention layer.')
group.add('--lambda_coverage', '-lambda_coverage', type=float, default=0.0,
help='Lambda value for coverage loss of See et al (2017)')
group.add('--loss_scale', '-loss_scale', type=float, default=0,
help="For FP16 training, the static loss scale to use. If not "
"set, the loss scale is dynamically computed.")
group.add('--apex_opt_level', '-apex_opt_level', type=str, default="O1",
choices=["O0", "O1", "O2", "O3"],
help="For FP16 training, the opt_level to use."
"See https://nvidia.github.io/apex/amp.html#opt-levels.")
def preprocess_opts(parser):
""" Pre-procesing options """
# Data options
group = parser.add_argument_group('Data')
group.add('--data_type', '-data_type', default="text",
help="Type of the source input. "
"Options are [text|img|audio|vec].")
group.add('--train_src', '-train_src', required=True, nargs='+',
help="Path(s) to the training source data")
group.add('--train_tgt', '-train_tgt', required=True, nargs='+',
help="Path(s) to the training target data")
group.add('--train_align', '-train_align', nargs='+', default=[None],
help="Path(s) to the training src-tgt alignment")
group.add('--train_ids', '-train_ids', nargs='+', default=[None],
help="ids to name training shards, used for corpus weighting")
group.add('--valid_src', '-valid_src',
help="Path to the validation source data")
group.add('--valid_tgt', '-valid_tgt',
help="Path to the validation target data")
group.add('--valid_align', '-valid_align', default=None,
help="Path(s) to the validation src-tgt alignment")
group.add('--src_dir', '-src_dir', default="",
help="Source directory for image or audio files.")
group.add('--save_data', '-save_data', required=True,
help="Output file for the prepared data")
group.add('--max_shard_size', '-max_shard_size', type=int, default=0,
help="""Deprecated use shard_size instead""")
group.add('--shard_size', '-shard_size', type=int, default=1000000,
help="Divide src_corpus and tgt_corpus into "
"smaller multiple src_copus and tgt corpus files, then "
"build shards, each shard will have "
"opt.shard_size samples except last shard. "
"shard_size=0 means no segmentation "
"shard_size>0 means segment dataset into multiple shards, "
"each shard has shard_size samples")
group.add('--num_threads', '-num_threads', type=int, default=1,
help="Number of shards to build in parallel.")
group.add('--overwrite', '-overwrite', action="store_true",
help="Overwrite existing shards if any.")
# Dictionary options, for text corpus
group = parser.add_argument_group('Vocab')
# if you want to pass an existing vocab.pt file, pass it to
# -src_vocab alone as it already contains tgt vocab.
group.add('--src_vocab', '-src_vocab', default="",
help="Path to an existing source vocabulary. Format: "
"one word per line.")
group.add('--tgt_vocab', '-tgt_vocab', default="",
help="Path to an existing target vocabulary. Format: "
"one word per line.")
group.add('--features_vocabs_prefix', '-features_vocabs_prefix',
type=str, default='',
help="Path prefix to existing features vocabularies")
group.add('--src_vocab_size', '-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
group.add('--tgt_vocab_size', '-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
group.add('--vocab_size_multiple', '-vocab_size_multiple',
type=int, default=1,
help="Make the vocabulary size a multiple of this value")
group.add('--src_words_min_frequency',
'-src_words_min_frequency', type=int, default=0)
group.add('--tgt_words_min_frequency',
'-tgt_words_min_frequency', type=int, default=0)
group.add('--dynamic_dict', '-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add('--share_vocab', '-share_vocab', action='store_true',
help="Share source and target vocabulary")
# Truncation options, for text corpus
group = parser.add_argument_group('Pruning')
group.add('--src_seq_length', '-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
group.add('--src_seq_length_trunc', '-src_seq_length_trunc',
type=int, default=None,
help="Truncate source sequence length.")
group.add('--tgt_seq_length', '-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
group.add('--tgt_seq_length_trunc', '-tgt_seq_length_trunc',
type=int, default=None,
help="Truncate target sequence length.")
group.add('--lower', '-lower', action='store_true', help='lowercase data')
group.add('--filter_valid', '-filter_valid', action='store_true',
help='Filter validation data by src and/or tgt length')
# Data processing options
group = parser.add_argument_group('Random')
group.add('--shuffle', '-shuffle', type=int, default=0,
help="Shuffle data")
group.add('--seed', '-seed', type=int, default=3435,
help="Random seed")
group = parser.add_argument_group('Logging')
group.add('--report_every', '-report_every', type=int, default=100000,
help="Report status every this many sentences")
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
# Options most relevant to speech
group = parser.add_argument_group('Speech')
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add('--window_stride', '-window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add('--window', '-window', default='hamming',
help="Window type for spectrogram generation.")
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3,
choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
def train_opts(parser):
""" Training and saving options """
group = parser.add_argument_group('General')
group.add('--data', '-data', required=True,
help='Path prefix to the ".train.pt" and '
'".valid.pt" file path from preprocess.py')
group.add('--data_ids', '-data_ids', nargs='+', default=[None],
help="In case there are several corpora.")
group.add('--data_weights', '-data_weights', type=int, nargs='+',
default=[1], help="""Weights of different corpora,
should follow the same order as in -data_ids.""")
group.add('--save_model', '-save_model', default='model',
help="Model filename (the model will be saved as "
"<save_model>_N.pt where N is the number "
"of steps")
group.add('--save_checkpoint_steps', '-save_checkpoint_steps',
type=int, default=5000,
help="""Save a checkpoint every X steps""")
group.add('--keep_checkpoint', '-keep_checkpoint', type=int, default=-1,
help="Keep X checkpoints (negative: keep all)")
# GPU
group.add('--gpuid', '-gpuid', default=[], nargs='*', type=int,
help="Deprecated see world_size and gpu_ranks.")
group.add('--gpu_ranks', '-gpu_ranks', default=[], nargs='*', type=int,
help="list of ranks of each process.")
group.add('--world_size', '-world_size', default=1, type=int,
help="total number of distributed processes.")
group.add('--gpu_backend', '-gpu_backend',
default="nccl", type=str,
help="Type of torch distributed backend")
group.add('--gpu_verbose_level', '-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add('--master_ip', '-master_ip', default="localhost", type=str,
help="IP of master for torch.distributed training.")
group.add('--master_port', '-master_port', default=10000, type=int,
help="Port of master for torch.distributed training.")
group.add('--queue_size', '-queue_size', default=400, type=int,
help="Size of queue for each process in producer/consumer")
group.add('--seed', '-seed', type=int, default=-1,
help="Random seed used for the experiments "
"reproducibility.")
# Init options
group = parser.add_argument_group('Initialization')
group.add('--param_init', '-param_init', type=float, default=0.1,
help="Parameters are initialized over uniform distribution "
"with support (-param_init, param_init). "
"Use 0 to not use initialization")
group.add('--param_init_glorot', '-param_init_glorot', action='store_true',
help="Init parameters with xavier_uniform. "
"Required for transformer.")
group.add('--train_from', '-train_from', default='', type=str,
help="If training from a checkpoint then this is the "
"path to the pretrained model's state_dict.")
group.add('--reset_optim', '-reset_optim', default='none',
choices=['none', 'all', 'states', 'keep_states'],
help="Optimization resetter when train_from.")
# Pretrained word vectors
group.add('--pre_word_vecs_enc', '-pre_word_vecs_enc',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the encoder side. "
"See README for specific formatting instructions.")
group.add('--pre_word_vecs_dec', '-pre_word_vecs_dec',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the decoder side. "
"See README for specific formatting instructions.")
# Fixed word vectors
group.add('--fix_word_vecs_enc', '-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
group.add('--fix_word_vecs_dec', '-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the decoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add('--batch_size', '-batch_size', type=int, default=64,
help='Maximum batch size for training')
group.add('--batch_type', '-batch_type', default='sents',
choices=["sents", "tokens"],
help="Batch grouping for batch_size. Standard "
"is sents. Tokens will do dynamic batching")
group.add('--pool_factor', '-pool_factor', type=int, default=8192,
help="""Factor used in data loading and batch creations.
It will load the equivalent of `pool_factor` batches,
sort them by the according `sort_key` to produce
homogeneous batches and reduce padding, and yield
the produced batches in a shuffled way.
Inspired by torchtext's pool mechanism.""")
group.add('--normalization', '-normalization', default='sents',
choices=["sents", "tokens"],
help='Normalization method of the gradient.')
group.add('--accum_count', '-accum_count', type=int, nargs='+',
default=[1],
help="Accumulate gradient this many times. "
"Approximately equivalent to updating "
"batch_size * accum_count batches at once. "
"Recommended for Transformer.")
group.add('--accum_steps', '-accum_steps', type=int, nargs='+',
default=[0], help="Steps at which accum_count values change")
group.add('--valid_steps', '-valid_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add('--valid_batch_size', '-valid_batch_size', type=int, default=32,
help='Maximum batch size for validation')
group.add('--max_generator_batches', '-max_generator_batches',
type=int, default=32,
help="Maximum batches of words in a sequence to run "
"the generator on in parallel. Higher is faster, but "
"uses more memory. Set to 0 to disable.")
group.add('--train_steps', '-train_steps', type=int, default=100000,
help='Number of training steps')
group.add('--single_pass', '-single_pass', action='store_true',
help="Make a single pass over the training dataset.")
group.add('--epochs', '-epochs', type=int, default=0,
help='Deprecated epochs see train_steps')
group.add('--early_stopping', '-early_stopping', type=int, default=0,
help='Number of validation steps without improving.')
group.add('--early_stopping_criteria', '-early_stopping_criteria',
nargs="*", default=None,
help='Criteria to use for early stopping.')
group.add('--optim', '-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam',
'sparseadam', 'adafactor', 'fusedadam'],
help="Optimization method.")
group.add('--adagrad_accumulator_init', '-adagrad_accumulator_init',
type=float, default=0,
help="Initializes the accumulator values in adagrad. "
"Mirrors the initial_accumulator_value option "
"in the tensorflow adagrad (use 0.1 for their default).")
group.add('--max_grad_norm', '-max_grad_norm', type=float, default=5,
help="If the norm of the gradient vector exceeds this, "
"renormalize it to have the norm equal to "
"max_grad_norm")
group.add('--dropout', '-dropout', type=float, default=[0.3], nargs='+',
help="Dropout probability; applied in LSTM stacks.")
group.add('--attention_dropout', '-attention_dropout', type=float,
default=[0.1], nargs='+',
help="Attention Dropout probability.")
group.add('--dropout_steps', '-dropout_steps', type=int, nargs='+',
default=[0], help="Steps at which dropout changes.")
group.add('--truncated_decoder', '-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add('--adam_beta1', '-adam_beta1', type=float, default=0.9,
help="The beta1 parameter used by Adam. "
"Almost without exception a value of 0.9 is used in "
"the literature, seemingly giving good results, "
"so we would discourage changing this value from "
"the default without due consideration.")
group.add('--adam_beta2', '-adam_beta2', type=float, default=0.999,
help='The beta2 parameter used by Adam. '
'Typically a value of 0.999 is recommended, as this is '
'the value suggested by the original paper describing '
'Adam, and is also the value adopted in other frameworks '
'such as Tensorflow and Keras, i.e. see: '
'https://www.tensorflow.org/api_docs/python/tf/train/Adam'
'Optimizer or https://keras.io/optimizers/ . '
'Whereas recently the paper "Attention is All You Need" '
'suggested a value of 0.98 for beta2, this parameter may '
'not work well for normal models / default '
'baselines.')
group.add('--label_smoothing', '-label_smoothing', type=float, default=0.0,
help="Label smoothing value epsilon. "
"Probabilities of all non-true labels "
"will be smoothed by epsilon / (vocab_size - 1). "
"Set to zero to turn off label smoothing. "
"For more detailed information, see: "
"https://arxiv.org/abs/1512.00567")
group.add('--average_decay', '-average_decay', type=float, default=0,
help="Moving average decay. "
"Set to other than 0 (e.g. 1e-4) to activate. "
"Similar to Marian NMT implementation: "
"http://www.aclweb.org/anthology/P18-4020 "
"For more detail on Exponential Moving Average: "
"https://en.wikipedia.org/wiki/Moving_average")
group.add('--average_every', '-average_every', type=int, default=1,
help="Step for moving average. "
"Default is every update, "
"if -average_decay is set.")
# learning rate
group = parser.add_argument_group('Optimization- Rate')
group.add('--learning_rate', '-learning_rate', type=float, default=1.0,
help="Starting learning rate. "
"Recommended settings: sgd = 1, adagrad = 0.1, "
"adadelta = 1, adam = 0.001")
group.add('--learning_rate_decay', '-learning_rate_decay',
type=float, default=0.5,
help="If update_learning_rate, decay learning rate by "
"this much if steps have gone past "
"start_decay_steps")
group.add('--start_decay_steps', '-start_decay_steps',
type=int, default=50000,
help="Start decaying every decay_steps after "
"start_decay_steps")
group.add('--decay_steps', '-decay_steps', type=int, default=10000,
help="Decay every decay_steps")
group.add('--decay_method', '-decay_method', type=str, default="none",
choices=['noam', 'noamwd', 'rsqrt', 'none'],
help="Use a custom decay rate.")
group.add('--warmup_steps', '-warmup_steps', type=int, default=4000,
help="Number of warmup steps for custom decay.")
group = parser.add_argument_group('Logging')
group.add('--report_every', '-report_every', type=int, default=50,
help="Print stats at this interval.")
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
group.add('--exp_host', '-exp_host', type=str, default="",
help="Send logs to this crayon server.")
group.add('--exp', '-exp', type=str, default="",
help="Name of the experiment for logging.")
# Use Tensorboard for visualization during training
group.add('--tensorboard', '-tensorboard', action="store_true",
help="Use tensorboard for visualization during training. "
"Must have the library tensorboard >= 1.14.")
group.add("--tensorboard_log_dir", "-tensorboard_log_dir",
type=str, default="runs/onmt",
help="Log directory for Tensorboard. "
"This is also the name of the run.")
group = parser.add_argument_group('Speech')
# Options most relevant to speech
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3, choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
def translate_opts(parser):
""" Translation / inference options """
group = parser.add_argument_group('Model')
group.add('--model', '-model', dest='models', metavar='MODEL',
nargs='+', type=str, default=[], required=True,
help="Path to model .pt file(s). "
"Multiple models can be specified, "
"for ensemble decoding.")
group.add('--fp32', '-fp32', action='store_true',
help="Force the model to be in FP32 "
"because FP16 is very slow on GTX1080(ti).")
group.add('--avg_raw_probs', '-avg_raw_probs', action='store_true',
help="If this is set, during ensembling scores from "
"different models will be combined by averaging their "
"raw probabilities and then taking the log. Otherwise, "
"the log probabilities will be averaged directly. "
"Necessary for models whose output layers can assign "
"zero probability.")
group = parser.add_argument_group('Data')
group.add('--data_type', '-data_type', default="text",
help="Type of the source input. Options: [text|img].")
group.add('--src', '-src', required=True,
help="Source sequence to decode (one line per "
"sequence)")
group.add('--src_dir', '-src_dir', default="",
help='Source directory for image or audio files')
group.add('--tgt', '-tgt',
help='True target sequence (optional)')
group.add('--shard_size', '-shard_size', type=int, default=10000,
help="Divide src and tgt (if applicable) into "
"smaller multiple src and tgt files, then "
"build shards, each shard will have "
"opt.shard_size samples except last shard. "
"shard_size=0 means no segmentation "
"shard_size>0 means segment dataset into multiple shards, "
"each shard has shard_size samples")
group.add('--output', '-output', default='pred.txt',
help="Path to output the predictions (each line will "
"be the decoded sequence")
group.add('--report_align', '-report_align', action='store_true',
help="Report alignment for each translation.")
group.add('--report_time', '-report_time', action='store_true',
help="Report some translation time metrics")
# Options most relevant to summarization.
group.add('--dynamic_dict', '-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add('--share_vocab', '-share_vocab', action='store_true',
help="Share source and target vocabulary")
group = parser.add_argument_group('Random Sampling')
group.add('--random_sampling_topk', '-random_sampling_topk',
default=1, type=int,
help="Set this to -1 to do random sampling from full "
"distribution. Set this to value k>1 to do random "
"sampling restricted to the k most likely next tokens. "
"Set this to 1 to use argmax or for doing beam "
"search.")
group.add('--random_sampling_temp', '-random_sampling_temp',
default=1., type=float,
help="If doing random sampling, divide the logits by "
"this before computing softmax during decoding.")
group.add('--seed', '-seed', type=int, default=829,
help="Random seed")
group = parser.add_argument_group('Beam')
group.add('--beam_size', '-beam_size', type=int, default=5,
help='Beam size')
group.add('--min_length', '-min_length', type=int, default=0,
help='Minimum prediction length')
group.add('--max_length', '-max_length', type=int, default=100,
help='Maximum prediction length.')
group.add('--max_sent_length', '-max_sent_length', action=DeprecateAction,
help="Deprecated, use `-max_length` instead")
# Alpha and Beta values for Google Length + Coverage penalty
# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7
group.add('--stepwise_penalty', '-stepwise_penalty', action='store_true',
help="Apply penalty at every decoding step. "
"Helpful for summary penalty.")
group.add('--length_penalty', '-length_penalty', default='none',
choices=['none', 'wu', 'avg'],
help="Length Penalty to use.")
group.add('--ratio', '-ratio', type=float, default=-0.,
help="Ratio based beam stop condition")
group.add('--coverage_penalty', '-coverage_penalty', default='none',
choices=['none', 'wu', 'summary'],
help="Coverage Penalty to use.")
group.add('--alpha', '-alpha', type=float, default=0.,
help="Google NMT length penalty parameter "
"(higher = longer generation)")
group.add('--beta', '-beta', type=float, default=-0.,
help="Coverage penalty parameter")
group.add('--block_ngram_repeat', '-block_ngram_repeat',
type=int, default=0,
help='Block repetition of ngrams during decoding.')
group.add('--ignore_when_blocking', '-ignore_when_blocking',
nargs='+', type=str, default=[],
help="Ignore these strings when blocking repeats. "
"You want to block sentence delimiters.")
group.add('--replace_unk', '-replace_unk', action="store_true",
help="Replace the generated UNK tokens with the "
"source token that had highest attention weight. If "
"phrase_table is provided, it will look up the "
"identified source token and give the corresponding "
"target token. If it is not provided (or the identified "
"source token does not exist in the table), then it "
"will copy the source token.")
group.add('--phrase_table', '-phrase_table', type=str, default="",
help="If phrase_table is provided (with replace_unk), it will "
"look up the identified source token and give the "
"corresponding target token. If it is not provided "
"(or the identified source token does not exist in "
"the table), then it will copy the source token.")
group = parser.add_argument_group('Logging')
group.add('--verbose', '-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
group.add('--attn_debug', '-attn_debug', action="store_true",
help='Print best attn for each word')
group.add('--dump_attn', '-dump_attn', action="store_true",
help="Dump attention score to this folder")
group.add('--align_debug', '-align_debug', action="store_true",
help='Print best align for each word')
group.add('--dump_beam', '-dump_beam', type=str, default="",
help='File to dump beam information to.')
group.add('--n_best', '-n_best', type=int, default=1,
help="If verbose is set, will output the n_best "
"decoded sentences")
group = parser.add_argument_group('Efficiency')
group.add('--batch_size', '-batch_size', type=int, default=30,
help='Batch size')
group.add('--batch_type', '-batch_type', default='sents',
choices=["sents", "tokens"],
help="Batch grouping for batch_size. Standard "
"is sents. Tokens will do dynamic batching")
group.add('--gpu', '-gpu', type=int, default=-1,
help="Device to run on")
# Options most relevant to speech.
group = parser.add_argument_group('Speech')
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help='Window size for spectrogram in seconds')
group.add('--window_stride', '-window_stride', type=float, default=.01,
help='Window stride for spectrogram in seconds')
group.add('--window', '-window', default='hamming',
help='Window type for spectrogram generation')
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3, choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class StoreLoggingLevelAction(configargparse.Action):
""" Convert string to logging level """
import logging
LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"NOTSET": logging.NOTSET
}
CHOICES = list(LEVELS.keys()) + [str(_) for _ in LEVELS.values()]
def __init__(self, option_strings, dest, help=None, **kwargs):
super(StoreLoggingLevelAction, self).__init__(
option_strings, dest, help=help, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
# Get the key 'value' in the dict, or just use 'value'
level = StoreLoggingLevelAction.LEVELS.get(value, value)
setattr(namespace, self.dest, level)
class DeprecateAction(configargparse.Action):
""" Deprecate action """
def __init__(self, option_strings, dest, help=None, **kwargs):
super(DeprecateAction, self).__init__(option_strings, dest, nargs=0,
help=help, **kwargs)
def __call__(self, parser, namespace, values, flag_name):
help = self.help if self.help is not None else ""
msg = "Flag '%s' is deprecated. %s" % (flag_name, help)
raise configargparse.ArgumentTypeError(msg)
| 42,843 | 51.893827 | 118 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/train_single.py | #!/usr/bin/env python
"""Training on a single process."""
import os
import torch
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from onmt.model_builder import build_model
from onmt.utils.optimizers import Optimizer
from onmt.utils.misc import set_random_seed
from onmt.trainer import build_trainer
from onmt.models import build_model_saver
from onmt.utils.logging import init_logger, logger
from onmt.utils.parse import ArgumentParser
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return enc + dec, enc, dec
def configure_process(opt, device_id):
if device_id >= 0:
torch.cuda.set_device(device_id)
set_random_seed(opt.seed, device_id >= 0)
def main(opt, device_id, batch_queue=None, semaphore=None):
# NOTE: It's important that ``opt`` has been validated and updated
# at this point.
configure_process(opt, device_id)
init_logger(opt.log_file)
assert len(opt.accum_count) == len(opt.accum_steps), \
'Number of accum_count values must match number of accum_steps'
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
checkpoint = None
model_opt = opt
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# Report src and tgt vocab sizes, including for features
for side in ['src', 'tgt']:
f = fields[side]
try:
f_iter = iter(f)
except TypeError:
f_iter = [(side, f)]
for sn, sf in f_iter:
if sf.use_vocab:
logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
# Build optimizer.
optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)
# Build model saver
model_saver = build_model_saver(model_opt, opt, model, fields, optim)
trainer = build_trainer(
opt, device_id, model, fields, optim, model_saver=model_saver)
if batch_queue is None:
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
else:
assert semaphore is not None, \
"Using batch_queue requires semaphore as well"
def _train_iter():
while True:
batch = batch_queue.get()
semaphore.release()
yield batch
train_iter = _train_iter()
valid_iter = build_dataset_iter(
"valid", fields, opt, is_train=False)
if len(opt.gpu_ranks):
logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
else:
logger.info('Starting training on CPU, could be very slow')
train_steps = opt.train_steps
if opt.single_pass and train_steps > 0:
logger.warning("Option single_pass is enabled, ignoring train_steps.")
train_steps = 0
trainer.train(
train_iter,
train_steps,
save_checkpoint_steps=opt.save_checkpoint_steps,
valid_iter=valid_iter,
valid_steps=opt.valid_steps)
if trainer.report_manager.tensorboard_writer is not None:
trainer.report_manager.tensorboard_writer.close()
| 4,977 | 32.863946 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/model_builder.py | """
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders import str2enc
from onmt.decoders import str2dec
from onmt.modules import Embeddings, CopyGenerator, TableEmbeddings
from onmt.modules.util_class import Cast
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
from onmt.utils.parse import ArgumentParser
def build_embeddings(opt, text_field, for_encoder=True):
"""
Args:
opt: the option in current environment.
text_field(TextMultiField): word and feats field.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
if opt.model_type == 'table' and for_encoder:
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
# value field
field = text_field[0][1]
word_padding_idx = field.vocab.stoi[field.pad_token]
word_vocab_size = len(field.vocab)
# pos field
field = text_field[1][1]
feat_padding_idx = field.vocab.stoi[field.pad_token]
feat_vocab_size = len(field.vocab)
ent_idx = text_field.base_field.vocab.stoi['<ent>']
return TableEmbeddings(
word_vec_size=emb_dim,
word_vocab_size=word_vocab_size,
word_padding_idx=word_padding_idx,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
feat_vocab_size=feat_vocab_size,
feat_padding_idx=feat_padding_idx,
merge=opt.feat_merge,
merge_activation=opt.feat_merge_activation,
dropout=opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
ent_idx=ent_idx
)
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
return Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs
)
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.model_type in ["text", "table"]:
enc_type = opt.encoder_type
else:
enc_type = opt.model_type
return str2enc[enc_type].from_opt(opt, embeddings)
def build_decoder(opt, embeddings, dims=None):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
dec_type = "ifrnn" if opt.decoder_type == "rnn" and opt.input_feed \
else opt.decoder_type
if dims is not None:
return str2dec[dec_type].from_opt(opt, embeddings, dims)
return str2dec[dec_type].from_opt(opt, embeddings)
def load_test_model(opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(
vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
)
else:
fields = vocab
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
opt.gpu)
if opt.fp32:
model.float()
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):
"""Build a model from opts.
Args:
model_opt: the option loaded from checkpoint. It's important that
the opts have been updated and validated. See
:class:`onmt.utils.parse.ArgumentParser`.
fields (dict[str, torchtext.data.Field]):
`Field` objects for the model.
gpu (bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
gpu_id (int or NoneType): Which GPU to use.
Returns:
the NMTModel.
"""
# Build embeddings.
if model_opt.model_type in ["text", 'table']:
src_field = fields["src"]
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
# Build encoder.
encoder = build_encoder(model_opt, src_emb)
if isinstance(encoder.embeddings, TableEmbeddings):
if getattr(model_opt, 'use_pos', True):
dims = (
encoder.embeddings.embedding_size,
encoder.embeddings.pos_embeddings.embedding_dim
)
else:
dims = encoder.embeddings.embedding_size
else:
dims = None
# Build decoder.
tgt_field = fields["tgt"]
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert src_field.base_field.vocab == tgt_field.base_field.vocab, \
"preprocess with -share_vocab if you use share_embeddings"
tgt_emb.word_lut.weight = src_emb.word_lut.weight
decoder = build_decoder(model_opt, tgt_emb, dims)
# Build NMTModel(= encoder + decoder).
if gpu and gpu_id is not None:
device = torch.device("cuda", gpu_id)
elif gpu and not gpu_id:
device = torch.device("cuda")
elif not gpu:
device = torch.device("cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab)),
Cast(torch.float32),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
tgt_base_field = fields["tgt"].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = {fix_key(k): v
for k, v in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec)
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16':
model.half()
return model
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
logger.info(model)
return model
| 9,581 | 34.227941 | 81 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/trainer.py | """
This is the loadable seq2seq trainer library that is
in charge of training details, loss compute, and statistics.
See train.py for a use case of this library.
Note: To make this a general library, we implement *only*
mechanism things here(i.e. what to do), and leave the strategy
things to users(i.e. how to do it). Also see train.py(one of the
users of this library) for the strategy things we do.
"""
import torch
import traceback
import onmt.utils
from onmt.utils.logging import logger
def build_trainer(opt, device_id, model, fields, optim, model_saver=None):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
tgt_field = dict(fields)["tgt"].base_field
train_loss = onmt.utils.loss.build_loss_compute(model, tgt_field, opt)
valid_loss = onmt.utils.loss.build_loss_compute(
model, tgt_field, opt, train=False)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches if opt.model_dtype == 'fp32' else 0
norm_method = opt.normalization
accum_count = opt.accum_count
accum_steps = opt.accum_steps
n_gpu = opt.world_size
average_decay = opt.average_decay
average_every = opt.average_every
dropout = opt.dropout
dropout_steps = opt.dropout_steps
if device_id >= 0:
gpu_rank = opt.gpu_ranks[device_id]
else:
gpu_rank = 0
n_gpu = 0
gpu_verbose_level = opt.gpu_verbose_level
earlystopper = onmt.utils.EarlyStopping(
opt.early_stopping, scorers=onmt.utils.scorers_from_opts(opt)) \
if opt.early_stopping > 0 else None
report_manager = onmt.utils.build_report_manager(opt, gpu_rank)
trainer = onmt.Trainer(model, train_loss, valid_loss, optim, trunc_size,
shard_size, norm_method,
accum_count, accum_steps,
n_gpu, gpu_rank,
gpu_verbose_level, report_manager,
with_align=True if opt.lambda_align > 0 else False,
model_saver=model_saver if gpu_rank == 0 else None,
average_decay=average_decay,
average_every=average_every,
model_dtype=opt.model_dtype,
earlystopper=earlystopper,
dropout=dropout,
dropout_steps=dropout_steps)
return trainer
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.models.model.NMTModel`): translation model
to train
train_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.utils.optimizers.Optimizer`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
accum_count(list): accumulate gradients this many times.
accum_steps(list): steps for accum gradients changes.
report_manager(:obj:`onmt.utils.ReportMgrBase`):
the object that creates reports, or None
model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is
used to save a checkpoint.
Thus nothing will be saved if this parameter is None
"""
def __init__(self, model, train_loss, valid_loss, optim,
trunc_size=0, shard_size=32,
norm_method="sents", accum_count=[1],
accum_steps=[0],
n_gpu=1, gpu_rank=1, gpu_verbose_level=0,
report_manager=None, with_align=False, model_saver=None,
average_decay=0, average_every=1, model_dtype='fp32',
earlystopper=None, dropout=[0.3], dropout_steps=[0]):
# Basic attributes.
self.model = model
self.train_loss = train_loss
self.valid_loss = valid_loss
self.optim = optim
self.trunc_size = trunc_size
self.shard_size = shard_size
self.norm_method = norm_method
self.accum_count_l = accum_count
self.accum_count = accum_count[0]
self.accum_steps = accum_steps
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.gpu_verbose_level = gpu_verbose_level
self.report_manager = report_manager
self.with_align = with_align
self.model_saver = model_saver
self.average_decay = average_decay
self.moving_average = None
self.average_every = average_every
self.model_dtype = model_dtype
self.earlystopper = earlystopper
self.dropout = dropout
self.dropout_steps = dropout_steps
for i in range(len(self.accum_count_l)):
assert self.accum_count_l[i] > 0
if self.accum_count_l[i] > 1:
assert self.trunc_size == 0, \
"""To enable accumulated gradients,
you must disable target sequence truncating."""
# Set model in training mode.
self.model.train()
def _accum_count(self, step):
for i in range(len(self.accum_steps)):
if step > self.accum_steps[i]:
_accum = self.accum_count_l[i]
return _accum
def _maybe_update_dropout(self, step):
for i in range(len(self.dropout_steps)):
if step > 1 and step == self.dropout_steps[i] + 1:
self.model.update_dropout(self.dropout[i])
logger.info("Updated dropout to %f from step %d"
% (self.dropout[i], step))
def _accum_batches(self, iterator):
batches = []
normalization = 0
self.accum_count = self._accum_count(self.optim.training_step)
for batch in iterator:
batches.append(batch)
if self.norm_method == "tokens":
num_tokens = batch.tgt[1:, :, 0].ne(
self.train_loss.padding_idx).sum()
normalization += num_tokens.item()
else:
normalization += batch.batch_size
if len(batches) == self.accum_count:
yield batches, normalization
self.accum_count = self._accum_count(self.optim.training_step)
batches = []
normalization = 0
if batches:
yield batches, normalization
def _update_average(self, step):
if self.moving_average is None:
copy_params = [params.detach().float()
for params in self.model.parameters()]
self.moving_average = copy_params
else:
average_decay = max(self.average_decay,
1 - (step + 1)/(step + 10))
for (i, avg), cpt in zip(enumerate(self.moving_average),
self.model.parameters()):
self.moving_average[i] = \
(1 - average_decay) * avg + \
cpt.detach().float() * average_decay
def train(self,
train_iter,
train_steps,
save_checkpoint_steps=5000,
valid_iter=None,
valid_steps=10000):
"""
The main training loop by iterating over `train_iter` and possibly
running validation on `valid_iter`.
Args:
train_iter: A generator that returns the next training batch.
train_steps: Run training for this many iterations.
save_checkpoint_steps: Save a checkpoint every this many
iterations.
valid_iter: A generator that returns the next validation batch.
valid_steps: Run evaluation every this many iterations.
Returns:
The gathered statistics.
"""
if valid_iter is None:
logger.info('Start training loop without validation...')
else:
logger.info('Start training loop and validate every %d steps...',
valid_steps)
total_stats = onmt.utils.Statistics()
report_stats = onmt.utils.Statistics()
self._start_report_manager(start_time=total_stats.start_time)
for i, (batches, normalization) in enumerate(
self._accum_batches(train_iter)):
step = self.optim.training_step
# UPDATE DROPOUT
self._maybe_update_dropout(step)
if self.gpu_verbose_level > 1:
logger.info("GpuRank %d: index: %d", self.gpu_rank, i)
if self.gpu_verbose_level > 0:
logger.info("GpuRank %d: reduce_counter: %d \
n_minibatch %d"
% (self.gpu_rank, i + 1, len(batches)))
if self.n_gpu > 1:
normalization = sum(onmt.utils.distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
batches, normalization, total_stats,
report_stats)
if self.average_decay > 0 and i % self.average_every == 0:
self._update_average(step)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate(),
report_stats)
if valid_iter is not None and step % valid_steps == 0:
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: validate step %d'
% (self.gpu_rank, step))
valid_stats = self.validate(
valid_iter, moving_average=self.moving_average)
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: gather valid stat \
step %d' % (self.gpu_rank, step))
valid_stats = self._maybe_gather_stats(valid_stats)
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: report stat step %d'
% (self.gpu_rank, step))
self._report_step(self.optim.learning_rate(),
step, valid_stats=valid_stats)
# Run patience mechanism
if self.earlystopper is not None:
self.earlystopper(valid_stats, step)
# If the patience has reached the limit, stop training
if self.earlystopper.has_stopped():
break
if (self.model_saver is not None
and (save_checkpoint_steps != 0
and step % save_checkpoint_steps == 0)):
self.model_saver.save(step, moving_average=self.moving_average)
if train_steps > 0 and step >= train_steps:
break
if self.model_saver is not None:
self.model_saver.save(step, moving_average=self.moving_average)
return total_stats
def validate(self, valid_iter, moving_average=None):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
valid_model = self.model
if moving_average:
# swap model params w/ moving average
# (and keep the original parameters)
model_params_data = []
for avg, param in zip(self.moving_average,
valid_model.parameters()):
model_params_data.append(param.data)
param.data = avg.data.half() if self.optim._fp16 == "legacy" \
else avg.data
# Set model in validating mode.
valid_model.eval()
with torch.no_grad():
stats = onmt.utils.Statistics()
for batch in valid_iter:
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
tgt = batch.tgt
# F-prop through the model.
outputs, attns = valid_model(src, tgt, src_lengths,
with_align=self.with_align)
# Compute loss.
_, batch_stats = self.valid_loss(batch, outputs, attns)
# Update statistics.
stats.update(batch_stats)
if moving_average:
for param_data, param in zip(model_params_data,
self.model.parameters()):
param.data = param_data
# Set model back to training mode.
valid_model.train()
return stats
def _gradient_accumulation(self, true_batches, normalization, total_stats,
report_stats):
if self.accum_count > 1:
self.optim.zero_grad()
for k, batch in enumerate(true_batches):
target_size = batch.tgt.size(0)
# Truncated BPTT: reminder not compatible with accum > 1
if self.trunc_size:
trunc_size = self.trunc_size
else:
trunc_size = target_size
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
if src_lengths is not None:
report_stats.n_src_words += src_lengths.sum().item()
tgt_outer = batch.tgt
bptt = False
for j in range(0, target_size-1, trunc_size):
# 1. Create truncated target.
tgt = tgt_outer[j: j + trunc_size]
# 2. F-prop all but generator.
if self.accum_count == 1:
self.optim.zero_grad()
outputs, attns = self.model(src, tgt, src_lengths, bptt=bptt,
with_align=self.with_align)
bptt = True
# 3. Compute loss.
try:
loss, batch_stats = self.train_loss(
batch,
outputs,
attns,
normalization=normalization,
shard_size=self.shard_size,
trunc_start=j,
trunc_size=trunc_size)
if loss is not None:
self.optim.backward(loss)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
except Exception:
traceback.print_exc()
logger.info("At step %d, we removed a batch - accum %d",
self.optim.training_step, k)
# 4. Update the parameters and statistics.
if self.accum_count == 1:
# Multi GPU gradient gather
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
onmt.utils.distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
# If truncated, don't backprop fully.
# TO CHECK
# if dec_state is not None:
# dec_state.detach()
if self.model.decoder.state is not None:
self.model.decoder.detach_state()
# in case of multi step gradient accumulation,
# update only after accum batches
if self.accum_count > 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
onmt.utils.distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return onmt.utils.Statistics.all_gather_stats(stat)
return stat
def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1)
def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats)
| 18,735 | 39.292473 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/inputters/text_dataset.py | # -*- coding: utf-8 -*-
from functools import partial
import six
import torch
from torchtext.data import Field, RawField
from onmt.inputters.datareader_base import DataReaderBase
class TextDataReader(DataReaderBase):
def read(self, sequences, side, _dir=None):
"""Read text data from disk.
Args:
sequences (str or Iterable[str]):
path to text file or iterable of the actual text data.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
_dir (NoneType): Leave as ``None``. This parameter exists to
conform with the :func:`DataReaderBase.read()` signature.
Yields:
dictionaries whose keys are the names of fields and whose
values are more or less the result of tokenizing with those
fields.
"""
assert _dir is None or _dir == "", \
"Cannot use _dir with TextDataReader."
if isinstance(sequences, str):
sequences = DataReaderBase._read_file(sequences)
for i, seq in enumerate(sequences):
if isinstance(seq, six.binary_type):
seq = seq.decode("utf-8")
yield {side: seq, "indices": i}
def text_sort_key(ex):
"""Sort using the number of tokens in the sequence."""
if hasattr(ex, "tgt"):
return len(ex.src[0]), len(ex.tgt[0])
return len(ex.src[0])
# mix this with partial
def _feature_tokenize(
string, layer=0, tok_delim=None, feat_delim=None, truncate=None):
"""Split apart word features (like POS/NER tags) from the tokens.
Args:
string (str): A string with ``tok_delim`` joining tokens and
features joined by ``feat_delim``. For example,
``"hello|NOUN|'' Earth|NOUN|PLANET"``.
layer (int): Which feature to extract. (Not used if there are no
features, indicated by ``feat_delim is None``). In the
example above, layer 2 is ``'' PLANET``.
truncate (int or NoneType): Restrict sequences to this length of
tokens.
Returns:
List[str] of tokens.
"""
tokens = string.split(tok_delim)
if truncate is not None:
tokens = tokens[:truncate]
if feat_delim is not None:
tokens = [t.split(feat_delim)[layer] for t in tokens]
return tokens
class TextMultiField(RawField):
"""Container for subfields.
Text data might use POS/NER/etc labels in addition to tokens.
This class associates the "base" :class:`Field` with any subfields.
It also handles padding the data and stacking it.
Args:
base_name (str): Name for the base field.
base_field (Field): The token field.
feats_fields (Iterable[Tuple[str, Field]]): A list of name-field
pairs.
Attributes:
fields (Iterable[Tuple[str, Field]]): A list of name-field pairs.
The order is defined as the base field first, then
``feats_fields`` in alphabetical order.
"""
def __init__(self, base_name, base_field, feats_fields):
super(TextMultiField, self).__init__()
self.fields = [(base_name, base_field)]
for name, ff in sorted(feats_fields, key=lambda kv: kv[0]):
self.fields.append((name, ff))
@property
def base_field(self):
return self.fields[0][1]
def process(self, batch, device=None):
"""Convert outputs of preprocess into Tensors.
Args:
batch (List[List[List[str]]]): A list of length batch size.
Each element is a list of the preprocess results for each
field (which are lists of str "words" or feature tags.
device (torch.device or str): The device on which the tensor(s)
are built.
Returns:
torch.LongTensor or Tuple[LongTensor, LongTensor]:
A tensor of shape ``(seq_len, batch_size, len(self.fields))``
where the field features are ordered like ``self.fields``.
If the base field returns lengths, these are also returned
and have shape ``(batch_size,)``.
"""
# batch (list(list(list))): batch_size x len(self.fields) x seq_len
batch_by_feat = list(zip(*batch))
base_data = self.base_field.process(batch_by_feat[0], device=device)
if self.base_field.include_lengths:
# lengths: batch_size
base_data, lengths = base_data
feats = [ff.process(batch_by_feat[i], device=device)
for i, (_, ff) in enumerate(self.fields[1:], 1)]
levels = [base_data] + feats
# data: seq_len x batch_size x len(self.fields)
data = torch.stack(levels, 2)
if self.base_field.include_lengths:
return data, lengths
else:
return data
def preprocess(self, x):
"""Preprocess data.
Args:
x (str): A sentence string (words joined by whitespace).
Returns:
List[List[str]]: A list of length ``len(self.fields)`` containing
lists of tokens/feature tags for the sentence. The output
is ordered like ``self.fields``.
"""
return [f.preprocess(x) for _, f in self.fields]
def __getitem__(self, item):
return self.fields[item]
def text_fields(**kwargs):
"""Create text fields.
Args:
base_name (str): Name associated with the field.
n_feats (int): Number of word level feats (not counting the tokens)
include_lengths (bool): Optionally return the sequence lengths.
pad (str, optional): Defaults to ``"<blank>"``.
bos (str or NoneType, optional): Defaults to ``"<s>"``.
eos (str or NoneType, optional): Defaults to ``"</s>"``.
truncate (bool or NoneType, optional): Defaults to ``None``.
Returns:
TextMultiField
"""
n_feats = kwargs["n_feats"]
include_lengths = kwargs["include_lengths"]
base_name = kwargs["base_name"]
pad = kwargs.get("pad", "<blank>")
bos = kwargs.get("bos", "<s>")
eos = kwargs.get("eos", "</s>")
truncate = kwargs.get("truncate", None)
fields_ = []
feat_delim = u"│" if n_feats > 0 else None
for i in range(n_feats + 1):
name = base_name + "_feat_" + str(i - 1) if i > 0 else base_name
tokenize = partial(
_feature_tokenize,
layer=i,
truncate=truncate,
feat_delim=feat_delim)
use_len = i == 0 and include_lengths
feat = Field(
init_token=bos, eos_token=eos,
pad_token=pad, tokenize=tokenize,
include_lengths=use_len)
fields_.append((name, feat))
assert fields_[0][0] == base_name # sanity check
field = TextMultiField(fields_[0][0], fields_[0][1], fields_[1:])
return field
| 6,904 | 34.410256 | 77 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/inputters/dataset_base.py | # coding: utf-8
from itertools import chain, starmap
from collections import Counter
import torch
from torchtext.data import Dataset as TorchtextDataset
from torchtext.data import Example
from torchtext.vocab import Vocab
def _join_dicts(*args):
"""
Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys.
"""
return dict(chain(*[d.items() for d in args]))
def _dynamic_dict(example, src_field, tgt_field):
"""Create copy-vocab and numericalize with it.
In-place adds ``"src_map"`` to ``example``. That is the copy-vocab
numericalization of the tokenized ``example["src"]``. If ``example``
has a ``"tgt"`` key, adds ``"alignment"`` to example. That is the
copy-vocab numericalization of the tokenized ``example["tgt"]``. The
alignment has an initial and final UNK token to match the BOS and EOS
tokens.
Args:
example (dict): An example dictionary with a ``"src"`` key and
maybe a ``"tgt"`` key. (This argument changes in place!)
src_field (torchtext.data.Field): Field object.
tgt_field (torchtext.data.Field): Field object.
Returns:
torchtext.data.Vocab and ``example``, changed as described.
"""
src = src_field.tokenize(example["src"])
# make a small vocab containing just the tokens in the source sequence
unk = src_field.unk_token
pad = src_field.pad_token
src_ex_vocab = Vocab(Counter(src), specials=[unk, pad])
unk_idx = src_ex_vocab.stoi[unk]
# Map source tokens to indices in the dynamic dict.
src_map = torch.LongTensor([src_ex_vocab.stoi[w] for w in src])
example["src_map"] = src_map
example["src_ex_vocab"] = src_ex_vocab
if "tgt" in example:
tgt = tgt_field.tokenize(example["tgt"])
mask = torch.LongTensor(
[unk_idx] + [src_ex_vocab.stoi[w] for w in tgt] + [unk_idx])
example["alignment"] = mask
return src_ex_vocab, example
class Dataset(TorchtextDataset):
"""Contain data and process it.
A dataset is an object that accepts sequences of raw data (sentence pairs
in the case of machine translation) and fields which describe how this
raw data should be processed to produce tensors. When a dataset is
instantiated, it applies the fields' preprocessing pipeline (but not
the bit that numericalizes it or turns it into batch tensors) to the raw
data, producing a list of :class:`torchtext.data.Example` objects.
torchtext's iterators then know how to use these examples to make batches.
Args:
fields (dict[str, Field]): a dict with the structure
returned by :func:`onmt.inputters.get_fields()`. Usually
that means the dataset side, ``"src"`` or ``"tgt"``. Keys match
the keys of items yielded by the ``readers``, while values
are lists of (name, Field) pairs. An attribute with this
name will be created for each :class:`torchtext.data.Example`
object and its value will be the result of applying the Field
to the data that matches the key. The advantage of having
sequences of fields for each piece of raw input is that it allows
the dataset to store multiple "views" of each input, which allows
for easy implementation of token-level features, mixed word-
and character-level models, and so on. (See also
:class:`onmt.inputters.TextMultiField`.)
readers (Iterable[onmt.inputters.DataReaderBase]): Reader objects
for disk-to-dict. The yielded dicts are then processed
according to ``fields``.
data (Iterable[Tuple[str, Any]]): (name, ``data_arg``) pairs
where ``data_arg`` is passed to the ``read()`` method of the
reader in ``readers`` at that position. (See the reader object for
details on the ``Any`` type.)
dirs (Iterable[str or NoneType]): A list of directories where
data is contained. See the reader object for more details.
sort_key (Callable[[torchtext.data.Example], Any]): A function
for determining the value on which data is sorted (i.e. length).
filter_pred (Callable[[torchtext.data.Example], bool]): A function
that accepts Example objects and returns a boolean value
indicating whether to include that example in the dataset.
Attributes:
src_vocabs (List[torchtext.data.Vocab]): Used with dynamic dict/copy
attention. There is a very short vocab for each src example.
It contains just the source words, e.g. so that the generator can
predict to copy them.
"""
def __init__(self, fields, readers, data, dirs, sort_key,
filter_pred=None):
self.sort_key = sort_key
can_copy = 'src_map' in fields and 'alignment' in fields
read_iters = [r.read(dat[1], dat[0], dir_) for r, dat, dir_
in zip(readers, data, dirs)]
# self.src_vocabs is used in collapse_copy_scores and Translator.py
self.src_vocabs = []
examples = []
for ex_dict in starmap(_join_dicts, zip(*read_iters)):
if can_copy:
src_field = fields['src']
tgt_field = fields['tgt']
# this assumes src_field and tgt_field are both text
src_ex_vocab, ex_dict = _dynamic_dict(
ex_dict, src_field.base_field, tgt_field.base_field)
self.src_vocabs.append(src_ex_vocab)
ex_fields = {k: [(k, v)] for k, v in fields.items() if
k in ex_dict}
ex = Example.fromdict(ex_dict, ex_fields)
examples.append(ex)
# fields needs to have only keys that examples have as attrs
fields = []
for _, nf_list in ex_fields.items():
assert len(nf_list) == 1
fields.append(nf_list[0])
super(Dataset, self).__init__(examples, fields, filter_pred)
def __getattr__(self, attr):
# avoid infinite recursion when fields isn't defined
if 'fields' not in vars(self):
raise AttributeError
if attr in self.fields:
return (getattr(x, attr) for x in self.examples)
else:
raise AttributeError
def save(self, path, remove_fields=True):
if remove_fields:
self.fields = []
torch.save(self, path)
@staticmethod
def config(fields):
readers, data, dirs = [], [], []
for name, field in fields:
if field["data"] is not None:
readers.append(field["reader"])
data.append((name, field["data"]))
dirs.append(field["dir"])
return readers, data, dirs
| 6,865 | 40.612121 | 78 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/inputters/inputter.py | # -*- coding: utf-8 -*-
import glob
import os
import codecs
import math
from collections import Counter, defaultdict
from itertools import chain, cycle
import torch
import torchtext.data
from torchtext.data import Field, RawField, LabelField
from torchtext.vocab import Vocab
from torchtext.data.utils import RandomShuffler
from onmt.inputters.text_dataset import text_fields, TextMultiField
from onmt.inputters.image_dataset import image_fields
from onmt.inputters.audio_dataset import audio_fields
from onmt.inputters.vec_dataset import vec_fields
from onmt.utils.logging import logger
# backwards compatibility
from onmt.inputters.text_dataset import _feature_tokenize # noqa: F401
from onmt.inputters.image_dataset import ( # noqa: F401
batch_img as make_img)
import gc
# monkey-patch to make torchtext Vocab's pickleable
def _getstate(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
Vocab.__getstate__ = _getstate
Vocab.__setstate__ = _setstate
def make_src(data, vocab):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
def make_tgt(data, vocab):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
class AlignField(LabelField):
"""
Parse ['<src>-<tgt>', ...] into ['<src>','<tgt>', ...]
"""
def __init__(self, **kwargs):
kwargs['use_vocab'] = False
kwargs['preprocessing'] = parse_align_idx
super(AlignField, self).__init__(**kwargs)
def process(self, batch, device=None):
""" Turn a batch of align-idx to a sparse align idx Tensor"""
sparse_idx = []
for i, example in enumerate(batch):
for src, tgt in example:
# +1 for tgt side to keep coherent after "bos" padding,
# register ['N°_in_batch', 'tgt_id+1', 'src_id']
sparse_idx.append([i, tgt + 1, src])
align_idx = torch.tensor(sparse_idx, dtype=self.dtype, device=device)
return align_idx
def parse_align_idx(align_pharaoh):
"""
Parse Pharaoh alignment into [[<src>, <tgt>], ...]
"""
align_list = align_pharaoh.strip().split(' ')
flatten_align_idx = []
for align in align_list:
try:
src_idx, tgt_idx = align.split('-')
except ValueError:
logger.warning("{} in `{}`".format(align, align_pharaoh))
logger.warning("Bad alignement line exists. Please check file!")
raise
flatten_align_idx.append([int(src_idx), int(tgt_idx)])
return flatten_align_idx
def get_fields(
src_data_type,
n_src_feats,
n_tgt_feats,
pad='<blank>',
bos='<s>',
eos='</s>',
dynamic_dict=False,
with_align=False,
src_truncate=None,
tgt_truncate=None
):
"""
Args:
src_data_type: type of the source input. Options are [text|img|audio].
n_src_feats (int): the number of source features (not counting tokens)
to create a :class:`torchtext.data.Field` for. (If
``src_data_type=="text"``, these fields are stored together
as a ``TextMultiField``).
n_tgt_feats (int): See above.
pad (str): Special pad symbol. Used on src and tgt side.
bos (str): Special beginning of sequence symbol. Only relevant
for tgt.
eos (str): Special end of sequence symbol. Only relevant
for tgt.
dynamic_dict (bool): Whether or not to include source map and
alignment fields.
with_align (bool): Whether or not to include word align.
src_truncate: Cut off src sequences beyond this (passed to
``src_data_type``'s data reader - see there for more details).
tgt_truncate: Cut off tgt sequences beyond this (passed to
:class:`TextDataReader` - see there for more details).
Returns:
A dict mapping names to fields. These names need to match
the dataset example attributes.
"""
assert src_data_type in ['text', 'img', 'audio', 'vec'], \
"Data type not implemented"
assert not dynamic_dict or src_data_type == 'text', \
'it is not possible to use dynamic_dict with non-text input'
fields = {}
fields_getters = {"text": text_fields,
"img": image_fields,
"audio": audio_fields,
"vec": vec_fields}
src_field_kwargs = {"n_feats": n_src_feats,
"include_lengths": True,
"pad": pad, "bos": None, "eos": None,
"truncate": src_truncate,
"base_name": "src"}
fields["src"] = fields_getters[src_data_type](**src_field_kwargs)
tgt_field_kwargs = {"n_feats": n_tgt_feats,
"include_lengths": False,
"pad": pad, "bos": bos, "eos": eos,
"truncate": tgt_truncate,
"base_name": "tgt"}
fields["tgt"] = fields_getters["text"](**tgt_field_kwargs)
indices = Field(use_vocab=False, dtype=torch.long, sequential=False)
fields["indices"] = indices
if dynamic_dict:
src_map = Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
fields["src_map"] = src_map
src_ex_vocab = RawField()
fields["src_ex_vocab"] = src_ex_vocab
align = Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["alignment"] = align
if with_align:
word_align = AlignField()
fields["align"] = word_align
return fields
def load_old_vocab(vocab, data_type="text", dynamic_dict=False):
"""Update a legacy vocab/field format.
Args:
vocab: a list of (field name, torchtext.vocab.Vocab) pairs. This is the
format formerly saved in *.vocab.pt files. Or, text data
not using a :class:`TextMultiField`.
data_type (str): text, img, or audio
dynamic_dict (bool): Used for copy attention.
Returns:
a dictionary whose keys are the field names and whose values Fields.
"""
if _old_style_vocab(vocab):
# List[Tuple[str, Vocab]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
vocab = dict(vocab)
n_src_features = sum('src_feat_' in k for k in vocab)
n_tgt_features = sum('tgt_feat_' in k for k in vocab)
fields = get_fields(
data_type, n_src_features, n_tgt_features,
dynamic_dict=dynamic_dict)
for n, f in fields.items():
try:
f_iter = iter(f)
except TypeError:
f_iter = [(n, f)]
for sub_n, sub_f in f_iter:
if sub_n in vocab:
sub_f.vocab = vocab[sub_n]
return fields
if _old_style_field_list(vocab): # upgrade to multifield
# Dict[str, List[Tuple[str, Field]]]
# doesn't change structure - don't return early.
fields = vocab
for base_name, vals in fields.items():
if ((base_name == 'src' and data_type == 'text') or
base_name == 'tgt'):
assert not isinstance(vals[0][1], TextMultiField)
fields[base_name] = [(base_name, TextMultiField(
vals[0][0], vals[0][1], vals[1:]))]
if _old_style_nesting(vocab):
# Dict[str, List[Tuple[str, Field]]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
fields = dict(list(chain.from_iterable(vocab.values())))
return fields
def _old_style_vocab(vocab):
"""Detect old-style vocabs (``List[Tuple[str, torchtext.data.Vocab]]``).
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is a list of pairs where the second object
is a :class:`torchtext.vocab.Vocab` object.
This exists because previously only the vocab objects from the fields
were saved directly, not the fields themselves, and the fields needed to
be reconstructed at training and translation time.
"""
return isinstance(vocab, list) and \
any(isinstance(v[1], Vocab) for v in vocab)
def _old_style_nesting(vocab):
"""Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``)."""
return isinstance(vocab, dict) and \
any(isinstance(v, list) for v in vocab.values())
def _old_style_field_list(vocab):
"""Detect old-style text fields.
Not old style vocab, old nesting, and text-type fields not using
``TextMultiField``.
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is not an :func:`_old_style_vocab` and not
a :class:`TextMultiField` (using an old-style text representation).
"""
# if tgt isn't using TextMultiField, then no text field is.
return (not _old_style_vocab(vocab)) and _old_style_nesting(vocab) and \
(not isinstance(vocab['tgt'][0][1], TextMultiField))
def old_style_vocab(vocab):
"""The vocab/fields need updated."""
return _old_style_vocab(vocab) or _old_style_field_list(vocab) or \
_old_style_nesting(vocab)
def filter_example(ex, use_src_len=True, use_tgt_len=True,
min_src_len=1, max_src_len=float('inf'),
min_tgt_len=1, max_tgt_len=float('inf')):
"""Return whether an example is an acceptable length.
If used with a dataset as ``filter_pred``, use :func:`partial()`
for all keyword arguments.
Args:
ex (torchtext.data.Example): An object with a ``src`` and ``tgt``
property.
use_src_len (bool): Filter based on the length of ``ex.src``.
use_tgt_len (bool): Similar to above.
min_src_len (int): A non-negative minimally acceptable length
(examples of exactly this length will be included).
min_tgt_len (int): Similar to above.
max_src_len (int or float): A non-negative (possibly infinite)
maximally acceptable length (examples of exactly this length
will be included).
max_tgt_len (int or float): Similar to above.
"""
src_len = len(ex.src[0])
tgt_len = len(ex.tgt[0])
return (not use_src_len or min_src_len <= src_len <= max_src_len) and \
(not use_tgt_len or min_tgt_len <= tgt_len <= max_tgt_len)
def _pad_vocab_to_multiple(vocab, multiple):
vocab_size = len(vocab)
if vocab_size % multiple == 0:
return
target_size = int(math.ceil(vocab_size / multiple)) * multiple
padding_tokens = [
"averyunlikelytoken%d" % i for i in range(target_size - vocab_size)]
vocab.extend(Vocab(Counter(), specials=padding_tokens))
return vocab
def _build_field_vocab(field, counter, size_multiple=1, **kwargs):
# this is basically copy-pasted from torchtext.
all_specials = [
field.unk_token, field.pad_token, field.init_token, field.eos_token
]
specials = [tok for tok in all_specials if tok is not None]
field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)
if size_multiple > 1:
_pad_vocab_to_multiple(field.vocab, size_multiple)
def _load_vocab(vocab_path, name, counters, min_freq):
# counters changes in place
vocab = _read_vocab_file(vocab_path, name)
vocab_size = len(vocab)
logger.info('Loaded %s vocab has %d tokens.' % (name, vocab_size))
for i, token in enumerate(vocab):
# keep the order of tokens specified in the vocab file by
# adding them to the counter with decreasing counting values
counters[name][token] = vocab_size - i + min_freq
return vocab, vocab_size
def _build_fv_from_multifield(multifield, counters, build_fv_args,
size_multiple=1):
for name, field in multifield:
_build_field_vocab(
field,
counters[name],
size_multiple=size_multiple,
**build_fv_args[name])
logger.info(" * %s vocab size: %d." % (name, len(field.vocab)))
def _build_fields_vocab(fields, counters, data_type, share_vocab,
vocab_size_multiple,
src_vocab_size, src_words_min_frequency,
tgt_vocab_size, tgt_words_min_frequency):
build_fv_args = defaultdict(dict)
build_fv_args["src"] = dict(
max_size=src_vocab_size, min_freq=src_words_min_frequency)
build_fv_args["tgt"] = dict(
max_size=tgt_vocab_size, min_freq=tgt_words_min_frequency)
tgt_multifield = fields["tgt"]
_build_fv_from_multifield(
tgt_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if data_type == 'text':
src_multifield = fields["src"]
_build_fv_from_multifield(
src_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
logger.info(" * merging src and tgt vocab...")
src_field = src_multifield.base_field
tgt_field = tgt_multifield.base_field
_merge_field_vocabs(
src_field, tgt_field, vocab_size=src_vocab_size,
min_freq=src_words_min_frequency,
vocab_size_multiple=vocab_size_multiple)
logger.info(" * merged vocab size: %d." % len(src_field.vocab))
return fields
def build_vocab(train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency,
vocab_size_multiple=1):
"""Build the fields for all data sides.
Args:
train_dataset_files: a list of train dataset pt file.
fields (dict[str, Field]): fields to build vocab for.
data_type (str): A supported data type string.
share_vocab (bool): share source and target vocabulary?
src_vocab_path (str): Path to src vocabulary file.
src_vocab_size (int): size of the source vocabulary.
src_words_min_frequency (int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path (str): Path to tgt vocabulary file.
tgt_vocab_size (int): size of the target vocabulary.
tgt_words_min_frequency (int): the minimum frequency needed to
include a target word in the vocabulary.
vocab_size_multiple (int): ensure that the vocabulary size is a
multiple of this value.
Returns:
Dict of Fields
"""
counters = defaultdict(Counter)
if src_vocab_path:
try:
logger.info("Using existing vocabulary...")
vocab = torch.load(src_vocab_path)
# return vocab to dump with standard name
return vocab
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
# empty train_dataset_files so that vocab is only loaded from
# given paths in src_vocab_path, tgt_vocab_path
train_dataset_files = []
# Load vocabulary
if src_vocab_path:
src_vocab, src_vocab_size = _load_vocab(
src_vocab_path, "src", counters,
src_words_min_frequency)
else:
src_vocab = None
if tgt_vocab_path:
tgt_vocab, tgt_vocab_size = _load_vocab(
tgt_vocab_path, "tgt", counters,
tgt_words_min_frequency)
else:
tgt_vocab = None
for i, path in enumerate(train_dataset_files):
dataset = torch.load(path)
logger.info(" * reloading %s." % path)
for ex in dataset.examples:
for name, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and src_vocab) or \
(sub_n == 'tgt' and tgt_vocab)
if sub_f.sequential and not has_vocab:
val = fd
counters[sub_n].update(val)
# Drop the none-using from memory but keep the last
if i < len(train_dataset_files) - 1:
dataset.examples = None
gc.collect()
del dataset.examples
gc.collect()
del dataset
gc.collect()
fields = _build_fields_vocab(
fields, counters, data_type,
share_vocab, vocab_size_multiple,
src_vocab_size, src_words_min_frequency,
tgt_vocab_size, tgt_words_min_frequency)
return fields # is the return necessary?
def _merge_field_vocabs(src_field, tgt_field, vocab_size, min_freq,
vocab_size_multiple):
# in the long run, shouldn't it be possible to do this by calling
# build_vocab with both the src and tgt data?
specials = [tgt_field.unk_token, tgt_field.pad_token,
tgt_field.init_token, tgt_field.eos_token]
merged = sum(
[src_field.vocab.freqs, tgt_field.vocab.freqs], Counter()
)
merged_vocab = Vocab(
merged, specials=specials,
max_size=vocab_size, min_freq=min_freq
)
if vocab_size_multiple > 1:
_pad_vocab_to_multiple(merged_vocab, vocab_size_multiple)
src_field.vocab = merged_vocab
tgt_field.vocab = merged_vocab
assert len(src_field.vocab) == len(tgt_field.vocab)
def _read_vocab_file(vocab_path, tag):
"""Loads a vocabulary from the given path.
Args:
vocab_path (str): Path to utf-8 text file containing vocabulary.
Each token should be on a line by itself. Tokens must not
contain whitespace (else only before the whitespace
is considered).
tag (str): Used for logging which vocab is being read.
"""
logger.info("Loading {} vocabulary from {}".format(tag, vocab_path))
if not os.path.exists(vocab_path):
raise RuntimeError(
"{} vocabulary not found at {}".format(tag, vocab_path))
else:
with codecs.open(vocab_path, 'r', 'utf-8') as f:
return [line.strip().split()[0] for line in f if line.strip()]
def batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if batch_size_multiple > 1:
overflowed += (
(len(minibatch) - overflowed) % batch_size_multiple)
if overflowed == 0:
yield minibatch
minibatch, size_so_far = [], 0
else:
if overflowed == len(minibatch):
logger.warning(
"An example was ignored, more tokens"
" than allowed by tokens batch_size")
else:
yield minibatch[:-overflowed]
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1, size_so_far)
if minibatch:
yield minibatch
def _pool(data, batch_size, batch_size_fn, batch_size_multiple,
sort_key, random_shuffler, pool_factor):
for p in torchtext.data.batch(
data, batch_size * pool_factor,
batch_size_fn=batch_size_fn):
p_batch = list(batch_iter(
sorted(p, key=sort_key),
batch_size,
batch_size_fn=batch_size_fn,
batch_size_multiple=batch_size_multiple))
for b in random_shuffler(p_batch):
yield b
class OrderedIterator(torchtext.data.Iterator):
def __init__(self,
dataset,
batch_size,
pool_factor=1,
batch_size_multiple=1,
yield_raw_example=False,
**kwargs):
super(OrderedIterator, self).__init__(dataset, batch_size, **kwargs)
self.batch_size_multiple = batch_size_multiple
self.yield_raw_example = yield_raw_example
self.dataset = dataset
self.pool_factor = pool_factor
def create_batches(self):
if self.train:
if self.yield_raw_example:
self.batches = batch_iter(
self.data(),
1,
batch_size_fn=None,
batch_size_multiple=1)
else:
self.batches = _pool(
self.data(),
self.batch_size,
self.batch_size_fn,
self.batch_size_multiple,
self.sort_key,
self.random_shuffler,
self.pool_factor)
else:
self.batches = []
for b in batch_iter(
self.data(),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple):
self.batches.append(sorted(b, key=self.sort_key))
def __iter__(self):
"""
Extended version of the definition in torchtext.data.Iterator.
Added yield_raw_example behaviour to yield a torchtext.data.Example
instead of a torchtext.data.Batch object.
"""
while True:
self.init_epoch()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
if self.sort_within_batch:
# NOTE: `rnn.pack_padded_sequence` requires that a
# minibatch be sorted by decreasing order, which
# requires reversing relative to typical sort keys
if self.sort:
minibatch.reverse()
else:
minibatch.sort(key=self.sort_key, reverse=True)
if self.yield_raw_example:
yield minibatch[0]
else:
yield torchtext.data.Batch(
minibatch,
self.dataset,
self.device)
if not self.repeat:
return
class MultipleDatasetIterator(object):
"""
This takes a list of iterable objects (DatasetLazyIter) and their
respective weights, and yields a batch in the wanted proportions.
"""
def __init__(self,
train_shards,
fields,
device,
opt):
self.index = -1
self.iterables = []
for shard in train_shards:
self.iterables.append(
build_dataset_iter(shard, fields, opt, multi=True))
self.init_iterators = True
self.weights = opt.data_weights
self.batch_size = opt.batch_size
self.batch_size_fn = max_tok_len \
if opt.batch_type == "tokens" else None
self.batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
self.device = device
# Temporarily load one shard to retrieve sort_key for data_type
temp_dataset = torch.load(self.iterables[0]._paths[0])
self.sort_key = temp_dataset.sort_key
self.random_shuffler = RandomShuffler()
self.pool_factor = opt.pool_factor
del temp_dataset
def _iter_datasets(self):
if self.init_iterators:
self.iterators = [iter(iterable) for iterable in self.iterables]
self.init_iterators = False
for weight in self.weights:
self.index = (self.index + 1) % len(self.iterators)
for i in range(weight):
yield self.iterators[self.index]
def _iter_examples(self):
for iterator in cycle(self._iter_datasets()):
yield next(iterator)
def __iter__(self):
while True:
for minibatch in _pool(
self._iter_examples(),
self.batch_size,
self.batch_size_fn,
self.batch_size_multiple,
self.sort_key,
self.random_shuffler,
self.pool_factor):
minibatch = sorted(minibatch, key=self.sort_key, reverse=True)
yield torchtext.data.Batch(minibatch,
self.iterables[0].dataset,
self.device)
class DatasetLazyIter(object):
"""Yield data from sharded dataset files.
Args:
dataset_paths: a list containing the locations of dataset files.
fields (dict[str, Field]): fields dict for the
datasets.
batch_size (int): batch size.
batch_size_fn: custom batch process function.
device: See :class:`OrderedIterator` ``device``.
is_train (bool): train or valid?
"""
def __init__(self, dataset_paths, fields, batch_size, batch_size_fn,
batch_size_multiple, device, is_train, pool_factor,
repeat=True, num_batches_multiple=1, yield_raw_example=False):
self._paths = dataset_paths
self.fields = fields
self.batch_size = batch_size
self.batch_size_fn = batch_size_fn
self.batch_size_multiple = batch_size_multiple
self.device = device
self.is_train = is_train
self.repeat = repeat
self.num_batches_multiple = num_batches_multiple
self.yield_raw_example = yield_raw_example
self.pool_factor = pool_factor
def _iter_dataset(self, path):
logger.info('Loading dataset from %s' % path)
cur_dataset = torch.load(path)
logger.info('number of examples: %d' % len(cur_dataset))
cur_dataset.fields = self.fields
cur_iter = OrderedIterator(
dataset=cur_dataset,
batch_size=self.batch_size,
pool_factor=self.pool_factor,
batch_size_multiple=self.batch_size_multiple,
batch_size_fn=self.batch_size_fn,
device=self.device,
train=self.is_train,
sort=False,
sort_within_batch=True,
repeat=False,
yield_raw_example=self.yield_raw_example
)
for batch in cur_iter:
self.dataset = cur_iter.dataset
yield batch
# NOTE: This is causing some issues for consumer/producer,
# as we may still have some of those examples in some queue
# cur_dataset.examples = None
# gc.collect()
# del cur_dataset
# gc.collect()
def __iter__(self):
num_batches = 0
paths = self._paths
if self.is_train and self.repeat:
# Cycle through the shards indefinitely.
paths = cycle(paths)
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if self.is_train and not self.repeat and \
num_batches % self.num_batches_multiple != 0:
# When the dataset is not repeated, we might need to ensure that
# the number of returned batches is the multiple of a given value.
# This is important for multi GPU training to ensure that all
# workers have the same number of batches to process.
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if num_batches % self.num_batches_multiple == 0:
return
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt[0]) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
def build_dataset_iter(corpus_type, fields, opt, is_train=True, multi=False):
"""
This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too.
"""
dataset_paths = list(sorted(
glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt')))
if not dataset_paths:
if is_train:
raise ValueError('Training data %s not found' % opt.data)
else:
return None
if multi:
batch_size = 1
batch_fn = None
batch_size_multiple = 1
else:
batch_size = opt.batch_size if is_train else opt.valid_batch_size
batch_fn = max_tok_len \
if is_train and opt.batch_type == "tokens" else None
batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
device = "cuda" if opt.gpu_ranks else "cpu"
return DatasetLazyIter(
dataset_paths,
fields,
batch_size,
batch_fn,
batch_size_multiple,
device,
is_train,
opt.pool_factor,
repeat=not opt.single_pass,
num_batches_multiple=max(opt.accum_count) * opt.world_size,
yield_raw_example=multi)
def build_dataset_iter_multiple(train_shards, fields, opt):
return MultipleDatasetIterator(
train_shards, fields, "cuda" if opt.gpu_ranks else "cpu", opt)
| 31,503 | 35.590012 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/inputters/audio_dataset.py | # -*- coding: utf-8 -*-
import os
from tqdm import tqdm
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
# imports of datatype-specific dependencies
try:
import torchaudio
import librosa
import numpy as np
except ImportError:
torchaudio, librosa, np = None, None, None
class AudioDataReader(DataReaderBase):
"""Read audio data from disk.
Args:
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation. See
:func:`librosa.stft()` ``window`` for more details.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int or NoneType): maximum audio length
(0 or None for unlimited).
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing any of ``torchaudio``, ``librosa``, or ``numpy`` fail.
"""
def __init__(self, sample_rate=0, window_size=0, window_stride=0,
window=None, normalize_audio=True, truncate=None):
self._check_deps()
self.sample_rate = sample_rate
self.window_size = window_size
self.window_stride = window_stride
self.window = window
self.normalize_audio = normalize_audio
self.truncate = truncate
@classmethod
def from_opt(cls, opt):
return cls(sample_rate=opt.sample_rate, window_size=opt.window_size,
window_stride=opt.window_stride, window=opt.window)
@classmethod
def _check_deps(cls):
if any([torchaudio is None, librosa is None, np is None]):
cls._raise_missing_dep(
"torchaudio", "librosa", "numpy")
def extract_features(self, audio_path):
# torchaudio loading options recently changed. It's probably
# straightforward to rewrite the audio handling to make use of
# up-to-date torchaudio, but in the meantime there is a legacy
# method which uses the old defaults
sound, sample_rate_ = torchaudio.legacy.load(audio_path)
if self.truncate and self.truncate > 0:
if sound.size(0) > self.truncate:
sound = sound[:self.truncate]
assert sample_rate_ == self.sample_rate, \
'Sample rate of %s != -sample_rate (%d vs %d)' \
% (audio_path, sample_rate_, self.sample_rate)
sound = sound.numpy()
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # average multiple channels
n_fft = int(self.sample_rate * self.window_size)
win_length = n_fft
hop_length = int(self.sample_rate * self.window_stride)
# STFT
d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=self.window)
spect, _ = librosa.magphase(d)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if self.normalize_audio:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
return spect
def read(self, data, side, src_dir=None):
"""Read data into dicts.
Args:
data (str or Iterable[str]): Sequence of audio paths or
path to file containing audio paths.
In either case, the filenames may be relative to ``src_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
src_dir (str): Location of source audio files. See ``data``.
Yields:
A dictionary containing audio data for each line.
"""
assert src_dir is not None and os.path.exists(src_dir),\
"src_dir must be a valid directory if data_type is audio"
if isinstance(data, str):
data = DataReaderBase._read_file(data)
for i, line in enumerate(tqdm(data)):
line = line.decode("utf-8").strip()
audio_path = os.path.join(src_dir, line)
if not os.path.exists(audio_path):
audio_path = line
assert os.path.exists(audio_path), \
'audio path %s not found' % line
spect = self.extract_features(audio_path)
yield {side: spect, side + '_path': line, 'indices': i}
def audio_sort_key(ex):
"""Sort using duration time of the sound spectrogram."""
return ex.src.size(1)
class AudioSeqField(Field):
"""Defines an audio datatype and instructions for converting to Tensor.
See :class:`Fields` for attribute descriptions.
"""
def __init__(self, preprocessing=None, postprocessing=None,
include_lengths=False, batch_first=False, pad_index=0,
is_target=False):
super(AudioSeqField, self).__init__(
sequential=True, use_vocab=False, init_token=None,
eos_token=None, fix_length=False, dtype=torch.float,
preprocessing=preprocessing, postprocessing=postprocessing,
lower=False, tokenize=None, include_lengths=include_lengths,
batch_first=batch_first, pad_token=pad_index, unk_token=None,
pad_first=False, truncate_first=False, stop_words=None,
is_target=is_target
)
def pad(self, minibatch):
"""Pad a batch of examples to the length of the longest example.
Args:
minibatch (List[torch.FloatTensor]): A list of audio data,
each having shape 1 x n_feats x len where len is variable.
Returns:
torch.FloatTensor or Tuple[torch.FloatTensor, List[int]]: The
padded tensor of shape ``(batch_size, 1, n_feats, max_len)``.
and a list of the lengths if `self.include_lengths` is `True`
else just returns the padded tensor.
"""
assert not self.pad_first and not self.truncate_first \
and not self.fix_length and self.sequential
minibatch = list(minibatch)
lengths = [x.size(1) for x in minibatch]
max_len = max(lengths)
nfft = minibatch[0].size(0)
sounds = torch.full((len(minibatch), 1, nfft, max_len), self.pad_token)
for i, (spect, len_) in enumerate(zip(minibatch, lengths)):
sounds[i, :, :, 0:len_] = spect
if self.include_lengths:
return (sounds, lengths)
return sounds
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has ``include_lengths=True``, a tensor of lengths will be
included in the return value.
Args:
arr (torch.FloatTensor or Tuple(torch.FloatTensor, List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True. Examples have shape
``(batch_size, 1, n_feats, max_len)`` if `self.batch_first`
else ``(max_len, batch_size, 1, n_feats)``.
device (str or torch.device): See `Field.numericalize`.
"""
assert self.use_vocab is False
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=torch.int, device=device)
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
if self.sequential and not self.batch_first:
arr = arr.permute(3, 0, 1, 2)
if self.sequential:
arr = arr.contiguous()
arr = arr.to(device)
if self.include_lengths:
return arr, lengths
return arr
def audio_fields(**kwargs):
audio = AudioSeqField(pad_index=0, batch_first=True, include_lengths=True)
return audio
| 8,459 | 36.93722 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/inputters/image_dataset.py | # -*- coding: utf-8 -*-
import os
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
# domain specific dependencies
try:
from PIL import Image
from torchvision import transforms
import cv2
except ImportError:
Image, transforms, cv2 = None, None, None
class ImageDataReader(DataReaderBase):
"""Read image data from disk.
Args:
truncate (tuple[int] or NoneType): maximum img size. Use
``(0,0)`` or ``None`` for unlimited.
channel_size (int): Number of channels per image.
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.
"""
def __init__(self, truncate=None, channel_size=3):
self._check_deps()
self.truncate = truncate
self.channel_size = channel_size
@classmethod
def from_opt(cls, opt):
return cls(channel_size=opt.image_channel_size)
@classmethod
def _check_deps(cls):
if any([Image is None, transforms is None, cv2 is None]):
cls._raise_missing_dep(
"PIL", "torchvision", "cv2")
def read(self, images, side, img_dir=None):
"""Read data into dicts.
Args:
images (str or Iterable[str]): Sequence of image paths or
path to file containing audio paths.
In either case, the filenames may be relative to ``src_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
img_dir (str): Location of source image files. See ``images``.
Yields:
a dictionary containing image data, path and index for each line.
"""
if isinstance(images, str):
images = DataReaderBase._read_file(images)
for i, filename in enumerate(images):
filename = filename.decode("utf-8").strip()
img_path = os.path.join(img_dir, filename)
if not os.path.exists(img_path):
img_path = filename
assert os.path.exists(img_path), \
'img path %s not found' % filename
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(Image.open(img_path))
if self.truncate and self.truncate != (0, 0):
if not (img.size(1) <= self.truncate[0]
and img.size(2) <= self.truncate[1]):
continue
yield {side: img, side + '_path': filename, 'indices': i}
def img_sort_key(ex):
"""Sort using the size of the image: (width, height)."""
return ex.src.size(2), ex.src.size(1)
def batch_img(data, vocab):
"""Pad and batch a sequence of images."""
c = data[0].size(0)
h = max([t.size(1) for t in data])
w = max([t.size(2) for t in data])
imgs = torch.zeros(len(data), c, h, w).fill_(1)
for i, img in enumerate(data):
imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
return imgs
def image_fields(**kwargs):
img = Field(
use_vocab=False, dtype=torch.float,
postprocessing=batch_img, sequential=False)
return img
| 3,378 | 30.579439 | 77 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/inputters/vec_dataset.py | import os
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
try:
import numpy as np
except ImportError:
np = None
class VecDataReader(DataReaderBase):
"""Read feature vector data from disk.
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing ``np`` fails.
"""
def __init__(self):
self._check_deps()
@classmethod
def _check_deps(cls):
if np is None:
cls._raise_missing_dep("np")
def read(self, vecs, side, vec_dir=None):
"""Read data into dicts.
Args:
vecs (str or Iterable[str]): Sequence of feature vector paths or
path to file containing feature vector paths.
In either case, the filenames may be relative to ``vec_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
vec_dir (str): Location of source vectors. See ``vecs``.
Yields:
A dictionary containing feature vector data.
"""
if isinstance(vecs, str):
vecs = DataReaderBase._read_file(vecs)
for i, filename in enumerate(vecs):
filename = filename.decode("utf-8").strip()
vec_path = os.path.join(vec_dir, filename)
if not os.path.exists(vec_path):
vec_path = filename
assert os.path.exists(vec_path), \
'vec path %s not found' % filename
vec = np.load(vec_path)
yield {side: torch.from_numpy(vec),
side + "_path": filename, "indices": i}
def vec_sort_key(ex):
"""Sort using the length of the vector sequence."""
return ex.src.shape[0]
class VecSeqField(Field):
"""Defines an vector datatype and instructions for converting to Tensor.
See :class:`Fields` for attribute descriptions.
"""
def __init__(self, preprocessing=None, postprocessing=None,
include_lengths=False, batch_first=False, pad_index=0,
is_target=False):
super(VecSeqField, self).__init__(
sequential=True, use_vocab=False, init_token=None,
eos_token=None, fix_length=False, dtype=torch.float,
preprocessing=preprocessing, postprocessing=postprocessing,
lower=False, tokenize=None, include_lengths=include_lengths,
batch_first=batch_first, pad_token=pad_index, unk_token=None,
pad_first=False, truncate_first=False, stop_words=None,
is_target=is_target
)
def pad(self, minibatch):
"""Pad a batch of examples to the length of the longest example.
Args:
minibatch (List[torch.FloatTensor]): A list of audio data,
each having shape ``(len, n_feats, feat_dim)``
where len is variable.
Returns:
torch.FloatTensor or Tuple[torch.FloatTensor, List[int]]: The
padded tensor of shape
``(batch_size, max_len, n_feats, feat_dim)``.
and a list of the lengths if `self.include_lengths` is `True`
else just returns the padded tensor.
"""
assert not self.pad_first and not self.truncate_first \
and not self.fix_length and self.sequential
minibatch = list(minibatch)
lengths = [x.size(0) for x in minibatch]
max_len = max(lengths)
nfeats = minibatch[0].size(1)
feat_dim = minibatch[0].size(2)
feats = torch.full((len(minibatch), max_len, nfeats, feat_dim),
self.pad_token)
for i, (feat, len_) in enumerate(zip(minibatch, lengths)):
feats[i, 0:len_, :, :] = feat
if self.include_lengths:
return (feats, lengths)
return feats
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has ``include_lengths=True``, a tensor of lengths will be
included in the return value.
Args:
arr (torch.FloatTensor or Tuple(torch.FloatTensor, List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True.
device (str or torch.device): See `Field.numericalize`.
"""
assert self.use_vocab is False
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=torch.int, device=device)
arr = arr.to(device)
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
if self.sequential and not self.batch_first:
arr = arr.permute(1, 0, 2, 3)
if self.sequential:
arr = arr.contiguous()
if self.include_lengths:
return arr, lengths
return arr
def vec_fields(**kwargs):
vec = VecSeqField(pad_index=0, include_lengths=True)
return vec
| 5,447 | 35.32 | 78 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/sparse_losses.py | import torch
import torch.nn as nn
from torch.autograd import Function
from onmt.modules.sparse_activations import _threshold_and_support
from onmt.utils.misc import aeq
class SparsemaxLossFunction(Function):
@staticmethod
def forward(ctx, input, target):
"""
input (FloatTensor): ``(n, num_classes)``.
target (LongTensor): ``(n,)``, the indices of the target classes
"""
input_batch, classes = input.size()
target_batch = target.size(0)
aeq(input_batch, target_batch)
z_k = input.gather(1, target.unsqueeze(1)).squeeze()
tau_z, support_size = _threshold_and_support(input, dim=1)
support = input > tau_z
x = torch.where(
support, input**2 - tau_z**2,
torch.tensor(0.0, device=input.device)
).sum(dim=1)
ctx.save_for_backward(input, target, tau_z)
# clamping necessary because of numerical errors: loss should be lower
# bounded by zero, but negative values near zero are possible without
# the clamp
return torch.clamp(x / 2 - z_k + 0.5, min=0.0)
@staticmethod
def backward(ctx, grad_output):
input, target, tau_z = ctx.saved_tensors
sparsemax_out = torch.clamp(input - tau_z, min=0)
delta = torch.zeros_like(sparsemax_out)
delta.scatter_(1, target.unsqueeze(1), 1)
return sparsemax_out - delta, None
sparsemax_loss = SparsemaxLossFunction.apply
class SparsemaxLoss(nn.Module):
"""
An implementation of sparsemax loss, first proposed in
:cite:`DBLP:journals/corr/MartinsA16`. If using
a sparse output layer, it is not possible to use negative log likelihood
because the loss is infinite in the case the target is assigned zero
probability. Inputs to SparsemaxLoss are arbitrary dense real-valued
vectors (like in nn.CrossEntropyLoss), not probability vectors (like in
nn.NLLLoss).
"""
def __init__(self, weight=None, ignore_index=-100,
reduction='elementwise_mean'):
assert reduction in ['elementwise_mean', 'sum', 'none']
self.reduction = reduction
self.weight = weight
self.ignore_index = ignore_index
super(SparsemaxLoss, self).__init__()
def forward(self, input, target):
loss = sparsemax_loss(input, target)
if self.ignore_index >= 0:
ignored_positions = target == self.ignore_index
size = float((target.size(0) - ignored_positions.sum()).item())
loss.masked_fill_(ignored_positions, 0.0)
else:
size = float(target.size(0))
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'elementwise_mean':
loss = loss.sum() / size
return loss
| 2,804 | 35.428571 | 78 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/sparse_activations.py | """
An implementation of sparsemax (Martins & Astudillo, 2016). See
:cite:`DBLP:journals/corr/MartinsA16` for detailed description.
By Ben Peters and Vlad Niculae
"""
import torch
from torch.autograd import Function
import torch.nn as nn
def _make_ix_like(input, dim=0):
d = input.size(dim)
rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype)
view = [1] * input.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _threshold_and_support(input, dim=0):
"""Sparsemax building block: compute the threshold
Args:
input: any dimension
dim: dimension along which to apply the sparsemax
Returns:
the threshold value
"""
input_srt, _ = torch.sort(input, descending=True, dim=dim)
input_cumsum = input_srt.cumsum(dim) - 1
rhos = _make_ix_like(input, dim)
support = rhos * input_srt > input_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = input_cumsum.gather(dim, support_size - 1)
tau /= support_size.to(input.dtype)
return tau, support_size
class SparsemaxFunction(Function):
@staticmethod
def forward(ctx, input, dim=0):
"""sparsemax: normalizing sparse transform (a la softmax)
Parameters:
input (Tensor): any shape
dim: dimension along which to apply sparsemax
Returns:
output (Tensor): same shape as input
"""
ctx.dim = dim
max_val, _ = input.max(dim=dim, keepdim=True)
input -= max_val # same numerical stability trick as for softmax
tau, supp_size = _threshold_and_support(input, dim=dim)
output = torch.clamp(input - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@staticmethod
def backward(ctx, grad_output):
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze()
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None
sparsemax = SparsemaxFunction.apply
class Sparsemax(nn.Module):
def __init__(self, dim=0):
self.dim = dim
super(Sparsemax, self).__init__()
def forward(self, input):
return sparsemax(input, self.dim)
class LogSparsemax(nn.Module):
def __init__(self, dim=0):
self.dim = dim
super(LogSparsemax, self).__init__()
def forward(self, input):
return torch.log(sparsemax(input, self.dim))
| 2,649 | 26.040816 | 78 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/structured_attention.py | import torch.nn as nn
import torch
import torch.cuda
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations"
:cite:`DBLP:journals/corr/LiuL17d`.
"""
def __init__(self, eps=1e-5):
self.eps = eps
super(MatrixTree, self).__init__()
def forward(self, input):
laplacian = input.exp() + self.eps
output = input.clone()
for b in range(input.size(0)):
lap = laplacian[b].masked_fill(
torch.eye(input.size(1), device=input.device).ne(0), 0)
lap = -lap + torch.diag(lap.sum(0))
# store roots on diagonal
lap[0] = input[b].diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1)\
.expand_as(input[b]).transpose(0, 1)
term1 = input[b].exp().mul(factor).clone()
term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output[b] = term1 - term2
roots_output = input[b].diag().exp().mul(
inv_laplacian.transpose(0, 1)[0])
output[b] = output[b] + torch.diag(roots_output)
return output
| 1,414 | 35.282051 | 77 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/util_class.py | """ Misc classes """
import torch
import torch.nn as nn
# At the moment this class is only used by embeddings.Embeddings look-up tables
class Elementwise(nn.ModuleList):
"""
A simple network container.
Parameters are a list of modules.
Inputs are a 3d Tensor whose last dimension is the same length
as the list.
Outputs are the result of applying modules to inputs elementwise.
An optional merge parameter allows the outputs to be reduced to a
single Tensor.
"""
def __init__(self, merge=None, *args):
assert merge in [None, 'first', 'concat', 'sum', 'mlp']
self.merge = merge
super(Elementwise, self).__init__(*args)
def forward(self, inputs):
inputs_ = [feat.squeeze(2) for feat in inputs.split(1, dim=2)]
assert len(self) == len(inputs_)
outputs = [f(x) for f, x in zip(self, inputs_)]
if self.merge == 'first':
return outputs[0]
elif self.merge == 'concat' or self.merge == 'mlp':
return torch.cat(outputs, 2)
elif self.merge == 'sum':
return sum(outputs)
else:
return outputs
class Cast(nn.Module):
"""
Basic layer that casts its input to a specific data type. The same tensor
is returned if the data type is already correct.
"""
def __init__(self, dtype):
super(Cast, self).__init__()
self._dtype = dtype
def forward(self, x):
return x.to(self._dtype)
| 1,486 | 29.346939 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/hierarchical_attention.py | from ..utils.misc import aeq
from .sparse_activations import sparsemax
from torch.nn.utils.rnn import pad_sequence
import torch
import onmt
class ContainsNaN(Exception):
pass
def _check_for_nan(tensor, msg=''):
if (tensor!=tensor).any():
raise ContainsNaN(msg)
def _check_sizes(tensor, *sizes):
for dim, (s, _s) in enumerate(zip(tensor.shape, sizes)):
assert s == _s, f'dim {dim} are not of equal sizes'
class AttentionScorer(torch.nn.Module):
"""
dim_query is dim of the decoder
dim_key is dim of the encoder output
"""
def __init__(self, dim, attn_type):
super().__init__()
if isinstance(dim, tuple):
assert len(dim) == 2
assert isinstance(dim[0], int)
assert isinstance(dim[1], int)
assert attn_type != 'dot'
self.dim_query = dim[0]
self.dim_key = dim[1]
elif isinstance(dim, int):
self.dim_query = dim
self.dim_key = dim
else:
raise ValueError('dim should a one or two ints')
self.attn_type = attn_type
if self.attn_type == "general":
self.linear_in = torch.nn.Linear(self.dim_query,
self.dim_key,
bias=False)
elif self.attn_type == "mlp":
self.linear_context = torch.nn.Linear(self.dim_key,
self.dim_key,
bias=False)
self.linear_query = torch.nn.Linear(self.dim_query,
self.dim_key,
bias=True)
self.v = torch.nn.Linear(self.dim_key, 1, bias=False)
def forward(self, h_t, h_s):
"""
Args:
h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)``
h_s (FloatTensor): sequence of sources ``(batch, src_len, dim``
Returns:
FloatTensor: raw attention scores (unnormalized) for each src index
``(batch, tgt_len, src_len)``
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, self.dim_key)
aeq(tgt_dim, self.dim_query)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, src_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
# where d is self.dim_key
return torch.bmm(h_t, h_s_)
else:
wq = self.linear_query(h_t.view(-1, tgt_dim))
wq = wq.view(tgt_batch, tgt_len, 1, src_dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, src_dim)
uh = self.linear_context(h_s.contiguous().view(-1, src_dim))
uh = uh.view(src_batch, 1, src_len, src_dim)
uh = uh.expand(src_batch, tgt_len, src_len, src_dim)
# (batch, t_len, s_len, d)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
class HierarchicalAttention(torch.nn.Module):
def __init__(self, dim, coverage=False, attn_type="dot",
attn_func="softmax", use_pos=True):
super().__init__()
assert not coverage
self.ent_size = onmt.ENT_SIZE
self.use_pos = use_pos
# dims shenanigans. memory_bank should be dim[0]
if isinstance(dim, tuple):
assert len(dim) == 2
assert isinstance(dim[0], int)
assert isinstance(dim[1], int)
self.chunks_dim = dim[0]
self.units_dim = dim[1]
elif isinstance(dim, int):
self.chunks_dim = dim
self.units_dim = dim
else:
raise ValueError('dim should be one or two ints')
if attn_func == 'softmax':
self.attn_func = torch.nn.functional.softmax
elif attn_func == 'sparsemax':
self.attn_func = sparsemax
else:
raise ValueError("Please select a valid attention function.")
assert attn_type in ["dot", "general", "mlp"], (
"Please select a valid attention type (got {:s}).".format(
attn_type))
self.attn_type = attn_type
self.unit_scorer = AttentionScorer((self.chunks_dim, self.units_dim),
attn_type)
self.chunk_scorer = AttentionScorer(self.chunks_dim, attn_type)
# mlp wants it with bias, others no
self.linear_out = torch.nn.Linear(self.chunks_dim * 2,
self.chunks_dim,
bias=(attn_type=="mlp"))
def forward(self, source, memory_bank):
"""
Args:
source (FloatTensor): query vectors ``(batch, tgt_len, dim)``
memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)``
Returns:
(FloatTensor, FloatTensor):
* Computed vector ``(tgt_len, batch, dim)``
* Attention distribtutions for each query
``(tgt_len, batch, src_len)``
In this setup, tgt_len will always be equal to one, due to inputfeeding
"""
# assert one step input
assert source.dim() == 2
source = source.unsqueeze(1)
# Unpacking memory_bank (we reassign memory_bank to optimize memory
# and minimize errors when copy/paste exisiting code)
# we transpose the batch_dim for the scoring compute
chunks, memory_bank, pos_embs, units_mask, chunk_mask = memory_bank
chunks = chunks.transpose(0, 1)
memory_bank = memory_bank.transpose(0, 1)
pos_embs = pos_embs.transpose(0, 1)
units_mask = units_mask.transpose(0, 1)
chunk_mask = chunk_mask.transpose(0, 1)
# _check_for_nan(chunks)
# _check_for_nan(memory_bank)
# _check_for_nan(pos_embs)
# Checks and balances
batch_size, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
aeq(batch_size, batch_)
aeq(dim, dim_)
aeq(self.chunks_dim, dim)
# compute attention scores, as in Luong et al.
# align_units is [batch_size, src_len]
# align_chunks is [batch_size, 1, n_ents]
if self.use_pos:
align_units = self.unit_scorer(source, pos_embs).squeeze(1)
else:
align_units = self.unit_scorer(source, memory_bank).squeeze(1)
align_chunks = self.chunk_scorer(source, chunks)
# we compute the softmax first on the unit level
# - we reshape so that each row is an entity
# - we mask the padding and the <ent> token
# - we softmax
# - we flatten the scores again
_check_for_nan(align_units, 'align units scores') # sanity check (1)
align_units = align_units.view(batch_size, -1, self.ent_size)
align_units = align_units.masked_fill(units_mask, float('-inf'))
_check_for_nan(align_units, 'align units scores filled with -inf') # sanity check (2)
# tricky block
# we softmax on the last dim, ie: separatly on each entity
# However, some entity might be full <pad>, meaning full -inf
# giving NaN when softmax is computed (dividing by zero)
# We find those nan and remove them
align_units = self.attn_func(align_units, -1) # softmax
nan_mask = (align_units != align_units).sum(dim=2).ne(0) # nan != nan
if nan_mask.sum().item():
align_units = align_units.masked_fill(nan_mask.unsqueeze(-1), 0)
_check_for_nan(align_units, 'align units after attn_func') # sanity check (3)
# we flatten the scores again
align_units = align_units.view(batch_size, 1, -1)
# Now the second level of attention, on the <ent> tokens
align_chunks.masked_fill_(chunk_mask, float('-inf'))
align_chunks = self.attn_func(align_chunks, -1)
# align_chunks = sparsemax(align_chunks, -1)
_check_for_nan(align_chunks, 'align_chunks after attn_func')
# To compute the final scores, we weight the unit scores by the chunk
# score from the chunk to witch they belong. We inflate the chunk scores
# and simply elementwise multiply.
# It's easy to see that it remains a proba distribution (ie, sums to 1)
align_chunks_inflated = align_chunks.repeat_interleave(repeats=self.ent_size, dim=-1)
align_vectors = align_chunks_inflated * align_units
#print(align_vectors.sum())
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, memory_bank)
# concatenate
concat_c = torch.cat([c, source], 2).view(batch_size*target_l, dim*2)
attn_h = self.linear_out(concat_c).view(batch_size, target_l, dim)
if self.attn_type in ["general", "dot"]:
attn_h = torch.tanh(attn_h)
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch_size, batch_)
aeq(dim, dim_)
batch_, source_l_ = align_vectors.size()
aeq(batch_size, batch_)
aeq(source_l, source_l_)
ret = {
'': align_vectors,
'_align_chunks': align_chunks.squeeze(1),
'_align_units':align_units.squeeze(1)
}
return attn_h, ret
| 10,134 | 36.537037 | 94 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/conv_multi_step_attention.py | """ Multi Step Attention for CNN """
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.utils.misc import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def seq_linear(linear, x):
""" linear transform for 3-d tensor """
batch, hidden_size, length, _ = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view(
batch * length, hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)
class ConvMultiStepAttention(nn.Module):
"""
Conv attention takes a key matrix, a value matrix and a query vector.
Attention weight is calculated by key matrix with the query vector
and sum on the value matrix. And the same operation is applied
in each decode conv layer.
"""
def __init__(self, input_size):
super(ConvMultiStepAttention, self).__init__()
self.linear_in = nn.Linear(input_size, input_size)
self.mask = None
def apply_mask(self, mask):
""" Apply mask """
self.mask = mask
def forward(self, base_target_emb, input_from_dec, encoder_out_top,
encoder_out_combine):
"""
Args:
base_target_emb: target emb tensor
input_from_dec: output of decode conv
encoder_out_top: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode
"""
# checks
# batch, channel, height, width = base_target_emb.size()
batch, _, height, _ = base_target_emb.size()
# batch_, channel_, height_, width_ = input_from_dec.size()
batch_, _, height_, _ = input_from_dec.size()
aeq(batch, batch_)
aeq(height, height_)
# enc_batch, enc_channel, enc_height = encoder_out_top.size()
enc_batch, _, enc_height = encoder_out_top.size()
# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()
enc_batch_, _, enc_height_ = encoder_out_combine.size()
aeq(enc_batch, enc_batch_)
aeq(enc_height, enc_height_)
preatt = seq_linear(self.linear_in, input_from_dec)
target = (base_target_emb + preatt) * SCALE_WEIGHT
target = torch.squeeze(target, 3)
target = torch.transpose(target, 1, 2)
pre_attn = torch.bmm(target, encoder_out_top)
if self.mask is not None:
pre_attn.data.masked_fill_(self.mask, -float('inf'))
attn = F.softmax(pre_attn, dim=2)
context_output = torch.bmm(
attn, torch.transpose(encoder_out_combine, 1, 2))
context_output = torch.transpose(
torch.unsqueeze(context_output, 3), 1, 2)
return context_output, attn
| 2,865 | 34.382716 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/average_attn.py | # -*- coding: utf-8 -*-
"""Average Attention module."""
import torch
import torch.nn as nn
from onmt.modules.position_ffn import PositionwiseFeedForward
class AverageAttention(nn.Module):
"""
Average Attention module from
"Accelerating Neural Transformer via an Average Attention Network"
:cite:`DBLP:journals/corr/abs-1805-00631`.
Args:
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, model_dim, dropout=0.1, aan_useffn=False):
self.model_dim = model_dim
self.aan_useffn = aan_useffn
super(AverageAttention, self).__init__()
if aan_useffn:
self.average_layer = PositionwiseFeedForward(model_dim, model_dim,
dropout)
self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2)
def cumulative_average_mask(self, batch_size, inputs_len, device):
"""
Builds the mask to compute the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(FloatTensor):
* A Tensor of shape ``(batch_size, input_len, input_len)``
"""
triangle = torch.tril(torch.ones(inputs_len, inputs_len,
dtype=torch.float, device=device))
weights = torch.ones(1, inputs_len, dtype=torch.float, device=device) \
/ torch.arange(1, inputs_len + 1, dtype=torch.float, device=device)
mask = triangle * weights.transpose(0, 1)
return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)
def cumulative_average(self, inputs, mask_or_step,
layer_cache=None, step=None):
"""
Computes the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6)
Args:
inputs (FloatTensor): sequence to average
``(batch_size, input_len, dimension)``
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
layer_cache: a dictionary containing the cumulative average
of the previous step.
Returns:
a tensor of the same shape and type as ``inputs``.
"""
if layer_cache is not None:
step = mask_or_step
average_attention = (inputs + step *
layer_cache["prev_g"]) / (step + 1)
layer_cache["prev_g"] = average_attention
return average_attention
else:
mask = mask_or_step
return torch.matmul(mask.to(inputs.dtype), inputs)
def forward(self, inputs, mask=None, layer_cache=None, step=None):
"""
Args:
inputs (FloatTensor): ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor, FloatTensor):
* gating_outputs ``(batch_size, input_len, model_dim)``
* average_outputs average attention
``(batch_size, input_len, model_dim)``
"""
batch_size = inputs.size(0)
inputs_len = inputs.size(1)
average_outputs = self.cumulative_average(
inputs, self.cumulative_average_mask(batch_size,
inputs_len, inputs.device)
if layer_cache is None else step, layer_cache=layer_cache)
if self.aan_useffn:
average_outputs = self.average_layer(average_outputs)
gating_outputs = self.gating_layer(torch.cat((inputs,
average_outputs), -1))
input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)
gating_outputs = torch.sigmoid(input_gate) * inputs + \
torch.sigmoid(forget_gate) * average_outputs
return gating_outputs, average_outputs
| 4,227 | 36.75 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/copy_generator.py | import torch
import torch.nn as nn
from onmt.utils.misc import aeq
from onmt.utils.loss import NMTLossCompute
def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs=None,
batch_dim=1, batch_offset=None):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambiguous.
"""
offset = len(tgt_vocab)
for b in range(scores.size(batch_dim)):
blank = []
fill = []
if src_vocabs is None:
src_vocab = batch.src_ex_vocab[b]
else:
batch_id = batch_offset[b] if batch_offset is not None else b
index = batch.indices.data[batch_id]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.Tensor(blank).type_as(batch.indices.data)
fill = torch.Tensor(fill).type_as(batch.indices.data)
score = scores[:, b] if batch_dim == 1 else scores[b]
score.index_add_(1, fill, score.index_select(1, blank))
score.index_fill_(1, blank, 1e-10)
return scores
class CopyGenerator(nn.Module):
"""An implementation of pointer-generator networks
:cite:`DBLP:journals/corr/SeeLM17`.
These networks consider copying words
directly from the source sequence.
The copy generator is an extended version of the standard
generator that computes three values.
* :math:`p_{softmax}` the standard softmax over `tgt_dict`
* :math:`p(z)` the probability of copying a word from
the source
* :math:`p_{copy}` the probility of copying a particular word.
taken from the attention distribution directly.
The model returns a distribution over the extend dictionary,
computed as
:math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`
.. mermaid::
graph BT
A[input]
S[src_map]
B[softmax]
BB[switch]
C[attn]
D[copy]
O[output]
A --> B
A --> BB
S --> D
C --> D
D --> O
B --> O
BB --> O
Args:
input_size (int): size of input representation
output_size (int): size of output vocabulary
pad_idx (int)
"""
def __init__(self, input_size, output_size, pad_idx):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(input_size, output_size)
self.linear_copy = nn.Linear(input_size, 1)
self.pad_idx = pad_idx
def forward(self, hidden, attn, src_map):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by copying
source words.
Args:
hidden (FloatTensor): hidden outputs ``(batch x tlen, input_size)``
attn (FloatTensor): attn for each ``(batch x tlen, input_size)``
src_map (FloatTensor):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
``(src_len, batch, extra_words)``
"""
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.pad_idx] = -float('inf')
prob = torch.softmax(logits, 1)
# Probability of copying p(z=1) batch.
p_copy = torch.sigmoid(self.linear_copy(hidden))
# Probability of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - p_copy)
mul_attn = torch.mul(attn, p_copy)
copy_prob = torch.bmm(
mul_attn.view(-1, batch, slen).transpose(0, 1),
src_map.transpose(0, 1)
).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
class CopyGeneratorLoss(nn.Module):
"""Copy generator criterion."""
def __init__(self, vocab_size, force_copy, unk_index=0,
ignore_index=-100, eps=1e-20):
super(CopyGeneratorLoss, self).__init__()
self.force_copy = force_copy
self.eps = eps
self.vocab_size = vocab_size
self.ignore_index = ignore_index
self.unk_index = unk_index
def forward(self, scores, align, target):
"""
Args:
scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size
whose sum along dim 1 is less than or equal to 1, i.e. cols
softmaxed.
align (LongTensor): ``(batch_size x tgt_len)``
target (LongTensor): ``(batch_size x tgt_len)``
"""
# probabilities assigned by the model to the gold targets
vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
# probability of tokens copied from source
copy_ix = align.unsqueeze(1) + self.vocab_size
copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)
# Set scores for unk to 0 and add eps
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps # to avoid -inf logs
# find the indices in which you do not use the copy mechanism
non_copy = align == self.unk_index
if not self.force_copy:
non_copy = non_copy | (target != self.unk_index)
probs = torch.where(
non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
)
loss = -probs.log() # just NLLLoss; can the module be incorporated?
# Drop padding.
loss[target == self.ignore_index] = 0
return loss
class CopyGeneratorLossCompute(NMTLossCompute):
"""Copy Generator Loss Computation."""
def __init__(self, criterion, generator, tgt_vocab, normalize_by_length,
lambda_coverage=0.0):
super(CopyGeneratorLossCompute, self).__init__(
criterion, generator, lambda_coverage=lambda_coverage)
self.tgt_vocab = tgt_vocab
self.normalize_by_length = normalize_by_length
def _make_shard_state(self, batch, output, range_, attns):
"""See base class for args description."""
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
shard_state = super(CopyGeneratorLossCompute, self)._make_shard_state(
batch, output, range_, attns)
shard_state.update({
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
})
return shard_state
def _compute_loss(self, batch, output, target, copy_attn, align,
std_attn=None, coverage_attn=None):
"""Compute the loss.
The args must match :func:`self._make_shard_state()`.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(
self._bottle(output), self._bottle(copy_attn), batch.src_map
)
loss = self.criterion(scores, align, target)
if self.lambda_coverage != 0.0:
coverage_loss = self._compute_coverage_loss(std_attn,
coverage_attn)
loss += coverage_loss
# this block does not depend on the loss value computed above
# and is used only for stats
scores_data = collapse_copy_scores(
self._unbottle(scores.clone(), batch.batch_size),
batch, self.tgt_vocab, None)
scores_data = self._bottle(scores_data)
# this block does not depend on the loss value computed above
# and is used only for stats
# Correct target copy token instead of <unk>
# tgt[i] = align[i] + len(tgt_vocab)
# for i such that tgt[i] == 0 and align[i] != 0
target_data = target.clone()
unk = self.criterion.unk_index
correct_mask = (target_data == unk) & (align != unk)
offset_align = align[correct_mask] + len(self.tgt_vocab)
target_data[correct_mask] += offset_align
# Compute sum of perplexities for stats
stats = self._stats(loss.sum().clone(), scores_data, target_data)
# this part looks like it belongs in CopyGeneratorLoss
if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:
loss = loss.sum()
return loss, stats
| 9,415 | 34.938931 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/self_attention.py | """
Custom reimplementation of torch.nn.MultiHeadAttention
It's actually the same module, with more or less flewibility at times,
and a more flexible use of the mask (different mask per element of the batch)
"""
from torch._jit_internal import weak_module, weak_script_method
from torch.nn.init import constant_
from torch.nn.parameter import Parameter
from torch.nn.init import xavier_uniform_
from torch.nn import functional as F
from onmt.utils.misc import tile
from onmt.modules import GatedLinear
import torch
@weak_module
class MultiHeadSelfAttention(torch.nn.Module):
"""
if glu_depth is not zero, we use GatedLinear layers instead of regular layers.
"""
def __init__(self, embed_dim, num_heads, dropout=0., glu_depth=0, bias=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
msg = "embed_dim must be divisible by num_heads, got {} and {}"
assert self.head_dim * num_heads == self.embed_dim, msg.format(embed_dim, num_heads)
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=bias)
# Gated Linear Unit
self._use_glu = isinstance(glu_depth, int) and glu_depth > 0
if self._use_glu:
if not self.head_dim % pow(2, glu_depth) == 0:
raise ValueError('When using GLU you need to use a head_dim that is '
'a multiple of two to the power glu_depth. '
f'Got {self.head_dim} % 2^{glu_depth} != 0')
glu_out_dim = self.head_dim // pow(2, glu_depth)
self.key_glu = GatedLinear(self.head_dim, glu_out_dim, glu_depth)
self.query_glu = GatedLinear(self.head_dim, glu_out_dim, glu_depth)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self.in_proj_weight[:self.embed_dim, :])
xavier_uniform_(self.in_proj_weight[self.embed_dim:(self.embed_dim * 2), :])
xavier_uniform_(self.in_proj_weight[(self.embed_dim * 2):, :])
xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
@weak_script_method
def forward(self, input, attn_mask=None):
"""
Inputs of forward function
input: [target length, batch size, embed dim]
attn_mask [(batch size), sequence_length, sequence_length]
Outputs of forward function
attn_output: [target length, batch size, embed dim]
attn_output_weights: [batch size, target length, sequence length]
"""
seq_len, bsz, embed_dim = input.size()
assert embed_dim == self.embed_dim
# self-attention
q, k, v = F.linear(input, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
q *= self.scaling
# Cut q, k, v in num_heads part
q = q.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
# Gated Linear Unit
if self._use_glu:
q = self.query_glu(q)
k = self.key_glu(k)
# batch matrix multply query against key
# attn_output_weights is [bsz * num_heads, seq_len, seq_len]
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * self.num_heads, seq_len, seq_len]
if attn_mask is not None:
if attn_mask.dim() == 2:
# We use the same mask for each item in the batch
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
# Each item in the batch has its own mask.
# We need to inflate the mask to go with all heads
attn_mask = tile(attn_mask, count=self.num_heads, dim=0)
else:
# Don't known what we would be doing here...
raise RuntimeError(f'Wrong mask dim: {attn_mask.dim()}')
# The mask should be either 0 of -inf to go with softmax
attn_output_weights += attn_mask
attn_output_weights = F.softmax(
attn_output_weights.float(), dim=-1,
dtype=torch.float32 if attn_output_weights.dtype == torch.float16 else attn_output_weights.dtype)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * self.num_heads, seq_len, self.head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(seq_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, self.num_heads, seq_len, seq_len)
attn_output_weights = attn_output_weights.sum(dim=1) / self.num_heads
return attn_output, attn_output_weights | 5,556 | 43.103175 | 109 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/embeddings.py | """ Embeddings module """
import math
import warnings
import torch
import torch.nn as nn
from onmt.modules.util_class import Elementwise
class PositionalEncoding(nn.Module):
"""Sinusoidal positional encoding for non-recurrent neural networks.
Implementation based on "Attention Is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
Args:
dropout (float): dropout parameter
dim (int): embedding size
"""
def __init__(self, dropout, dim, max_len=5000):
if dim % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(dim))
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *
-(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(1)
super(PositionalEncoding, self).__init__()
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb, step=None):
"""Embed inputs.
Args:
emb (FloatTensor): Sequence of word vectors
``(seq_len, batch_size, self.dim)``
step (int or NoneType): If stepwise (``seq_len = 1``), use
the encoding for this position.
"""
emb = emb * math.sqrt(self.dim)
if step is None:
emb = emb + self.pe[:emb.size(0)]
else:
emb = emb + self.pe[step]
emb = self.dropout(emb)
return emb
class VecEmbedding(nn.Module):
def __init__(self, vec_size,
emb_dim,
position_encoding=False,
dropout=0):
super(VecEmbedding, self).__init__()
self.embedding_size = emb_dim
self.proj = nn.Linear(vec_size, emb_dim, bias=False)
self.word_padding_idx = 0 # vector seqs are zero-padded
self.position_encoding = position_encoding
if self.position_encoding:
self.pe = PositionalEncoding(dropout, self.embedding_size)
def forward(self, x, step=None):
"""
Args:
x (FloatTensor): input, ``(len, batch, 1, vec_feats)``.
Returns:
FloatTensor: embedded vecs ``(len, batch, embedding_size)``.
"""
x = self.proj(x).squeeze(2)
if self.position_encoding:
x = self.pe(x, step=step)
return x
def load_pretrained_vectors(self, file):
assert not file
class Embeddings(nn.Module):
"""Words embeddings for encoder/decoder.
Additionally includes ability to add sparse input features
based on "Linguistic Input Features Improve Neural Machine Translation"
:cite:`sennrich2016linguistic`.
.. mermaid::
graph LR
A[Input]
C[Feature 1 Lookup]
A-->B[Word Lookup]
A-->C
A-->D[Feature N Lookup]
B-->E[MLP/Concat]
C-->E
D-->E
E-->F[Output]
Args:
word_vec_size (int): size of the dictionary of embeddings.
word_padding_idx (int): padding index for words in the embeddings.
feat_padding_idx (List[int]): padding index for a list of features
in the embeddings.
word_vocab_size (int): size of dictionary of embeddings for words.
feat_vocab_sizes (List[int], optional): list of size of dictionary
of embeddings for each feature.
position_encoding (bool): see :class:`~onmt.modules.PositionalEncoding`
feat_merge (string): merge action for the features embeddings:
concat, sum or mlp.
feat_vec_exponent (float): when using `-feat_merge concat`, feature
embedding size is N^feat_dim_exponent, where N is the
number of values the feature takes.
feat_vec_size (int): embedding dimension for features when using
`-feat_merge mlp`
dropout (float): dropout probability.
"""
def __init__(self, word_vec_size,
word_vocab_size,
word_padding_idx,
position_encoding=False,
feat_merge="concat",
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=0,
sparse=False,
fix_word_vecs=False):
self._validate_args(feat_merge, feat_vocab_sizes, feat_vec_exponent,
feat_vec_size, feat_padding_idx)
if feat_padding_idx is None:
feat_padding_idx = []
self.word_padding_idx = word_padding_idx
self.word_vec_size = word_vec_size
# Dimensions and padding for constructing the word embedding matrix
vocab_sizes = [word_vocab_size]
emb_dims = [word_vec_size]
pad_indices = [word_padding_idx]
# Dimensions and padding for feature embedding matrices
# (these have no effect if feat_vocab_sizes is empty)
if feat_merge == 'sum':
feat_dims = [word_vec_size] * len(feat_vocab_sizes)
elif feat_vec_size > 0:
feat_dims = [feat_vec_size] * len(feat_vocab_sizes)
else:
feat_dims = [int(vocab ** feat_vec_exponent)
for vocab in feat_vocab_sizes]
vocab_sizes.extend(feat_vocab_sizes)
emb_dims.extend(feat_dims)
pad_indices.extend(feat_padding_idx)
# The embedding matrix look-up tables. The first look-up table
# is for words. Subsequent ones are for features, if any exist.
emb_params = zip(vocab_sizes, emb_dims, pad_indices)
embeddings = [nn.Embedding(vocab, dim, padding_idx=pad, sparse=sparse)
for vocab, dim, pad in emb_params]
emb_luts = Elementwise(feat_merge, embeddings)
# The final output size of word + feature vectors. This can vary
# from the word vector size if and only if features are defined.
# This is the attribute you should access if you need to know
# how big your embeddings are going to be.
self.embedding_size = (sum(emb_dims) if feat_merge == 'concat'
else word_vec_size)
# The sequence of operations that converts the input sequence
# into a sequence of embeddings. At minimum this consists of
# looking up the embeddings for each word and feature in the
# input. Model parameters may require the sequence to contain
# additional operations as well.
super(Embeddings, self).__init__()
self.make_embedding = nn.Sequential()
self.make_embedding.add_module('emb_luts', emb_luts)
if feat_merge == 'mlp' and len(feat_vocab_sizes) > 0:
in_dim = sum(emb_dims)
mlp = nn.Sequential(nn.Linear(in_dim, word_vec_size), nn.ReLU())
self.make_embedding.add_module('mlp', mlp)
self.position_encoding = position_encoding
if self.position_encoding:
pe = PositionalEncoding(dropout, self.embedding_size)
self.make_embedding.add_module('pe', pe)
if fix_word_vecs:
self.word_lut.weight.requires_grad = False
def _validate_args(self, feat_merge, feat_vocab_sizes, feat_vec_exponent,
feat_vec_size, feat_padding_idx):
if feat_merge == "sum":
# features must use word_vec_size
if feat_vec_exponent != 0.7:
warnings.warn("Merging with sum, but got non-default "
"feat_vec_exponent. It will be unused.")
if feat_vec_size != -1:
warnings.warn("Merging with sum, but got non-default "
"feat_vec_size. It will be unused.")
elif feat_vec_size > 0:
# features will use feat_vec_size
if feat_vec_exponent != -1:
warnings.warn("Not merging with sum and positive "
"feat_vec_size, but got non-default "
"feat_vec_exponent. It will be unused.")
else:
if feat_vec_exponent <= 0:
raise ValueError("Using feat_vec_exponent to determine "
"feature vec size, but got feat_vec_exponent "
"less than or equal to 0.")
n_feats = len(feat_vocab_sizes)
if n_feats != len(feat_padding_idx):
raise ValueError("Got unequal number of feat_vocab_sizes and "
"feat_padding_idx ({:d} != {:d})".format(
n_feats, len(feat_padding_idx)))
@property
def word_lut(self):
"""Word look-up table."""
return self.make_embedding[0][0]
@property
def emb_luts(self):
"""Embedding look-up table."""
return self.make_embedding[0]
def load_pretrained_vectors(self, emb_file):
"""Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
"""
if emb_file:
pretrained = torch.load(emb_file)
pretrained_vec_size = pretrained.size(1)
if self.word_vec_size > pretrained_vec_size:
self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained
elif self.word_vec_size < pretrained_vec_size:
self.word_lut.weight.data \
.copy_(pretrained[:, :self.word_vec_size])
else:
self.word_lut.weight.data.copy_(pretrained)
def forward(self, source, step=None):
"""Computes the embeddings for words and features.
Args:
source (LongTensor): index tensor ``(len, batch, nfeat)``
Returns:
FloatTensor: Word embeddings ``(len, batch, embedding_size)``
"""
if self.position_encoding:
for i, module in enumerate(self.make_embedding._modules.values()):
if i == len(self.make_embedding._modules.values()) - 1:
source = module(source, step=step)
else:
source = module(source)
else:
source = self.make_embedding(source)
return source
def update_dropout(self, dropout):
if self.position_encoding:
self._modules['make_embedding'][1].dropout.p = dropout
| 10,689 | 36.640845 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/global_attention.py | """Global attention modules (Luong / Bahdanau)"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.sparse_activations import sparsemax
from onmt.utils.misc import aeq, sequence_mask
# This class is mainly used by decoder.py for RNNs but also
# by the CNN / transformer decoder when copy attention is used
# CNN has its own attention mechanism ConvMultiStepAttention
# Transformer has its own MultiHeadedAttention
class GlobalAttention(nn.Module):
r"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \sum_{j=1}^{\text{SeqLength}} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`\text{score}(H_j,q) = H_j^T q`
* general: :math:`\text{score}(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`\text{score}(H_j, q) = v_a^T \text{tanh}(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
attn_func (str): attention function to use, options [softmax,sparsemax]
"""
def __init__(self, dim, coverage=False, attn_type="dot",
attn_func="softmax"):
super(GlobalAttention, self).__init__()
self.dim = dim
assert attn_type in ["dot", "general", "mlp"], (
"Please select a valid attention type (got {:s}).".format(
attn_type))
self.attn_type = attn_type
assert attn_func in ["softmax", "sparsemax"], (
"Please select a valid attention function.")
self.attn_func = attn_func
if self.attn_type == "general":
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == "mlp":
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
# mlp wants it with bias
out_bias = self.attn_type == "mlp"
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)``
h_s (FloatTensor): sequence of sources ``(batch, src_len, dim``
Returns:
FloatTensor: raw attention scores (unnormalized) for each src index
``(batch, tgt_len, src_len)``
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, source, memory_bank, memory_lengths=None, coverage=None):
"""
Args:
source (FloatTensor): query vectors ``(batch, tgt_len, dim)``
memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)``
memory_lengths (LongTensor): the source context lengths ``(batch,)``
coverage (FloatTensor): None (not supported yet)
Returns:
(FloatTensor, FloatTensor):
* Computed vector ``(tgt_len, batch, dim)``
* Attention distribtutions for each query
``(tgt_len, batch, src_len)``
"""
# one step input
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
batch, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, source_l_ = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
memory_bank += self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
# compute attention scores, as in Luong et al.
align = self.score(source, memory_bank)
if memory_lengths is not None:
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1) # Make it broadcastable.
align.masked_fill_(~mask, -float('inf'))
# Softmax or sparsemax to normalize attention weights
if self.attn_func == "softmax":
align_vectors = F.softmax(align.view(batch*target_l, source_l), -1)
else:
align_vectors = sparsemax(align.view(batch*target_l, source_l), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, memory_bank)
# concatenate
concat_c = torch.cat([c, source], 2).view(batch*target_l, dim*2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ["general", "dot"]:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, source_l_ = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
target_l_, batch_, dim_ = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
target_l_, batch_, source_l_ = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
return attn_h, align_vectors
| 7,827 | 33.333333 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/glu.py | """Comes directly from fairseq"""
import torch, math
class Downsample(torch.nn.Module):
"""
Selects every nth element along the last dim, where n is the index
"""
def __init__(self, in_dim, step):
super().__init__()
self._step = step
self._in_dim = in_dim
if in_dim % step != 0:
raise ValueError('in_dim should be a multiple of step. '
f'Got {in_dim} and {step}.')
self.index = torch.LongTensor(range(0, in_dim, step))
def forward(self, input):
return input.index_select(dim=-1, index=self.index.to(input.device))
def extra_repr(self):
return f'{self._in_dim}, {self._in_dim//self._step}'
def Linear(in_features, out_features, dropout=0., bias=True):
"""Weight-normalized Linear layer (input: B x T x C)"""
m = torch.nn.Linear(in_features, out_features, bias=bias)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return torch.nn.utils.weight_norm(m)
class GatedLinear(torch.nn.Module):
def __init__(self, in_features, out_features, depth=2,
downsample=0, dropout=0., bias=True):
"""
Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units.
GLU units split the input in half to use one as values and one as gates:
glu([a; b]) = a * sigmoid(b)
"""
super().__init__()
self._num_layers = depth
self._bias = bias
self._dropout = dropout
self._downsample = isinstance(downsample, int) and downsample > 0
self.glu = torch.nn.GLU(dim=-1)
# In order to halve the dims at each step and end on out_features
# we need to start with out_feature * 2^depth and decrease the power
# of 2 at each depth.
if self._downsample:
self.linear_in = torch.nn.Sequential(
Downsample(in_features, downsample),
Linear(in_features//downsample, out_features * pow(2, depth), dropout, bias)
)
else:
if in_features != out_features * pow(2, depth):
raise ValueError('When not using downsampling, in_features should be '
'equal to out_feature * 2^depth. '
f'Got {in_features} != {out_features} * 2^{depth}')
self.linear_layers = torch.nn.ModuleList([
Linear(out_features * pow(2, depth - k),
out_features * pow(2, depth - k),
dropout, bias)
for k in range(1, depth+1)
])
def forward(self, input):
output = self.linear_in(input) if self._downsample else input
for linear in self.linear_layers:
output = linear(self.glu(output))
return output | 2,916 | 37.893333 | 92 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/gate.py | """ ContextGate module """
import torch
import torch.nn as nn
def context_gate_factory(gate_type, embeddings_size, decoder_size,
attention_size, output_size):
"""Returns the correct ContextGate class"""
gate_types = {'source': SourceContextGate,
'target': TargetContextGate,
'both': BothContextGate}
assert gate_type in gate_types, "Not valid ContextGate type: {0}".format(
gate_type)
return gate_types[gate_type](embeddings_size, decoder_size, attention_size,
output_size)
class ContextGate(nn.Module):
"""
Context gate is a decoder module that takes as input the previous word
embedding, the current decoder state and the attention state, and
produces a gate.
The gate can be used to select the input from the target side context
(decoder state), from the source context (attention state) or both.
"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(ContextGate, self).__init__()
input_size = embeddings_size + decoder_size + attention_size
self.gate = nn.Linear(input_size, output_size, bias=True)
self.sig = nn.Sigmoid()
self.source_proj = nn.Linear(attention_size, output_size)
self.target_proj = nn.Linear(embeddings_size + decoder_size,
output_size)
def forward(self, prev_emb, dec_state, attn_state):
input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1)
z = self.sig(self.gate(input_tensor))
proj_source = self.source_proj(attn_state)
proj_target = self.target_proj(
torch.cat((prev_emb, dec_state), dim=1))
return z, proj_source, proj_target
class SourceContextGate(nn.Module):
"""Apply the context gate only to the source context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(SourceContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(
prev_emb, dec_state, attn_state)
return self.tanh(target + z * source)
class TargetContextGate(nn.Module):
"""Apply the context gate only to the target context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(TargetContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh(z * target + source)
class BothContextGate(nn.Module):
"""Apply the context gate to both contexts"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(BothContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh((1. - z) * target + z * source)
| 3,635 | 38.521739 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/weight_norm.py | """ Weights normalization modules """
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
def get_var_maybe_avg(namespace, var_name, training, polyak_decay):
""" utility for retrieving polyak averaged params
Update average
"""
v = getattr(namespace, var_name)
v_avg = getattr(namespace, var_name + '_avg')
v_avg -= (1 - polyak_decay) * (v_avg - v.data)
if training:
return v
else:
return v_avg
def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
""" utility for retrieving polyak averaged params """
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(
namespace, vn, training, polyak_decay))
return vars
class WeightNormLinear(nn.Linear):
"""
Implementation of "Weight Normalization: A Simple Reparameterization
to Accelerate Training of Deep Neural Networks"
:cite:`DBLP:journals/corr/SalimansK16`
As a reparameterization method, weight normalization is same
as BatchNormalization, but it doesn't depend on minibatch.
NOTE: This is used nowhere in the code at this stage
Vincent Nguyen 05/18/2018
"""
def __init__(self, in_features, out_features,
init_scale=1., polyak_decay=0.9995):
super(WeightNormLinear, self).__init__(
in_features, out_features, bias=True)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_features))
self.b = self.bias
self.register_buffer(
'V_avg', torch.zeros(out_features, in_features))
self.register_buffer('g_avg', torch.zeros(out_features))
self.register_buffer('b_avg', torch.zeros(out_features))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_features * in_features
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
# norm is out_features * 1
v_norm = self.V.data / \
self.V.data.norm(2, 1).expand_as(self.V.data)
# batch_size * out_features
x_init = F.linear(x, v_norm).data
# out_features
m_init, v_init = x_init.mean(0).squeeze(
0), x_init.var(0).squeeze(0)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
x_init = scale_init.view(1, -1).expand_as(x_init) \
* (x_init - m_init.view(1, -1).expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(self, ['V', 'g', 'b'],
self.training,
polyak_decay=self.polyak_decay)
# batch_size * out_features
x = F.linear(x, v)
scalar = g / torch.norm(v, 2, 1).squeeze(1)
x = scalar.view(1, -1).expand_as(x) * x + \
b.view(1, -1).expand_as(x)
return x
class WeightNormConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding,
dilation, groups)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_channels, in_channels // groups, * kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()
).type_as(self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.view(self.out_channels, -1)\
.norm(2, 1).view(self.out_channels, *(
[1] * (len(self.kernel_size) + 1))).expand_as(self.V.data)
x_init = F.conv2d(x, v_norm, None, self.stride,
self.padding, self.dilation, self.groups).data
t_x_init = x_init.transpose(0, 1).contiguous().view(
self.out_channels, -1)
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(
x_init) * (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = torch.norm(v.view(self.out_channels, -1), 2, 1)
if len(scalar.size()) == 2:
scalar = g / scalar.squeeze(1)
else:
scalar = g / scalar
w = scalar.view(self.out_channels, *
([1] * (len(v.size()) - 1))).expand_as(v) * v
x = F.conv2d(x, w, b, self.stride,
self.padding, self.dilation, self.groups)
return x
# This is used nowhere in the code at the moment (Vincent Nguyen 05/18/2018)
class WeightNormConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConvTranspose2d, self).__init__(
in_channels, out_channels,
kernel_size, stride,
padding, output_padding,
groups)
# in_channels, out_channels, *kernel_size
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# in_channels, out_channels, *kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.transpose(0, 1).contiguous() \
.view(self.out_channels, -1).norm(2, 1).view(
self.in_channels, self.out_channels,
*([1] * len(self.kernel_size))).expand_as(self.V.data)
x_init = F.conv_transpose2d(
x, v_norm, None, self.stride,
self.padding, self.output_padding, self.groups).data
# self.out_channels, 1
t_x_init = x_init.tranpose(0, 1).contiguous().view(
self.out_channels, -1)
# out_features
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(x_init)\
* (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = g / \
torch.norm(v.transpose(0, 1).contiguous().view(
self.out_channels, -1), 2, 1).squeeze(1)
w = scalar.view(self.in_channels, self.out_channels,
*([1] * (len(v.size()) - 2))).expand_as(v) * v
x = F.conv_transpose2d(x, w, b, self.stride,
self.padding, self.output_padding,
self.groups)
return x
| 9,775 | 38.578947 | 78 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/position_ffn.py | """Position feed-forward network from "Attention is All You Need"."""
import torch.nn as nn
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""Layer definition.
Args:
x: ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor): Output ``(batch_size, input_len, model_dim)``.
"""
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def update_dropout(self, dropout):
self.dropout_1.p = dropout
self.dropout_2.p = dropout
| 1,308 | 30.166667 | 73 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/multi_headed_attn.py | """ Multi-Head Attention module """
import math
import torch
import torch.nn as nn
from onmt.utils.misc import generate_relative_positions_matrix,\
relative_matmul
# from onmt.utils.misc import aeq
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention module from "Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`.
Similar to standard `dot` attention but uses
multiple attention distributions simulataneously
to select relevant items.
.. mermaid::
graph BT
A[key]
B[value]
C[query]
O[output]
subgraph Attn
D[Attn 1]
E[Attn 2]
F[Attn N]
end
A --> D
C --> D
A --> E
C --> E
A --> F
C --> F
D --> O
E --> O
F --> O
B --> O
Also includes several additional tricks.
Args:
head_count (int): number of parallel heads
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, head_count, model_dim, dropout=0.1,
max_relative_positions=0):
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.linear_values = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.linear_query = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.final_linear = nn.Linear(model_dim, model_dim)
self.max_relative_positions = max_relative_positions
if max_relative_positions > 0:
vocab_size = max_relative_positions * 2 + 1
self.relative_positions_embeddings = nn.Embedding(
vocab_size, self.dim_per_head)
def forward(self, key, value, query, mask=None,
layer_cache=None, attn_type=None):
"""
Compute the context vector and the attention vectors.
Args:
key (FloatTensor): set of `key_len`
key vectors ``(batch, key_len, dim)``
value (FloatTensor): set of `key_len`
value vectors ``(batch, key_len, dim)``
query (FloatTensor): set of `query_len`
query vectors ``(batch, query_len, dim)``
mask: binary mask 1/0 indicating which keys have
zero / non-zero attention ``(batch, query_len, key_len)``
Returns:
(FloatTensor, FloatTensor):
* output context vectors ``(batch, query_len, dim)``
* Attention vector in heads ``(batch, head, query_len, key_len)``.
"""
# CHECKS
# batch, k_len, d = key.size()
# batch_, k_len_, d_ = value.size()
# aeq(batch, batch_)
# aeq(k_len, k_len_)
# aeq(d, d_)
# batch_, q_len, d_ = query.size()
# aeq(batch, batch_)
# aeq(d, d_)
# aeq(self.model_dim % 8, 0)
# if mask is not None:
# batch_, q_len_, k_len_ = mask.size()
# aeq(batch_, batch)
# aeq(k_len_, k_len)
# aeq(q_len_ == q_len)
# END CHECKS
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
key_len = key.size(1)
query_len = query.size(1)
def shape(x):
"""Projection."""
return x.view(batch_size, -1, head_count, dim_per_head) \
.transpose(1, 2)
def unshape(x):
"""Compute context."""
return x.transpose(1, 2).contiguous() \
.view(batch_size, -1, head_count * dim_per_head)
# 1) Project key, value, and query.
if layer_cache is not None:
if attn_type == "self":
query, key, value = self.linear_query(query),\
self.linear_keys(query),\
self.linear_values(query)
key = shape(key)
value = shape(value)
if layer_cache["self_keys"] is not None:
key = torch.cat(
(layer_cache["self_keys"], key),
dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat(
(layer_cache["self_values"], value),
dim=2)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif attn_type == "context":
query = self.linear_query(query)
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = layer_cache["memory_keys"],\
layer_cache["memory_values"]
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
if self.max_relative_positions > 0 and attn_type == "self":
key_len = key.size(2)
# 1 or key_len x key_len
relative_positions_matrix = generate_relative_positions_matrix(
key_len, self.max_relative_positions,
cache=True if layer_cache is not None else False)
# 1 or key_len x key_len x dim_per_head
relations_keys = self.relative_positions_embeddings(
relative_positions_matrix.to(key.device))
# 1 or key_len x key_len x dim_per_head
relations_values = self.relative_positions_embeddings(
relative_positions_matrix.to(key.device))
query = shape(query)
key_len = key.size(2)
query_len = query.size(2)
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
# batch x num_heads x query_len x key_len
query_key = torch.matmul(query, key.transpose(2, 3))
if self.max_relative_positions > 0 and attn_type == "self":
scores = query_key + relative_matmul(query, relations_keys, True)
else:
scores = query_key
scores = scores.float()
if mask is not None:
mask = mask.unsqueeze(1) # [B, 1, 1, T_values]
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores).to(query.dtype)
drop_attn = self.dropout(attn)
context_original = torch.matmul(drop_attn, value)
if self.max_relative_positions > 0 and attn_type == "self":
context = unshape(context_original
+ relative_matmul(drop_attn,
relations_values,
False))
else:
context = unshape(context_original)
output = self.final_linear(context)
# CHECK
# batch_, q_len_, d_ = output.size()
# aeq(q_len, q_len_)
# aeq(batch, batch_)
# aeq(d, d_)
# Return multi-head attn
attns = attn \
.view(batch_size, head_count,
query_len, key_len)
return output, attns
def update_dropout(self, dropout):
self.dropout.p = dropout
| 8,133 | 34.212121 | 77 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/modules/table_embeddings.py | import torch
class TableEmbeddings(torch.nn.Module):
"""
Now that I think about it, we can do more efficiently than rewritting the
onmt module. I will in the future but for now this code works as is,
so I won't chance breaking it!
These embeddings follow the table structure: a table is an unordered set
of tuple (pos, value) where pos can be viewed as column name. As
such, TableEmbeddings' forward returns embeddings for pos and value.
Furthermore, the value embedding can be merged with the pos embedding.
Most argument names are not very fitting but stay the same
as onmt.modules.Embeddings
"""
def __init__(self,
word_vec_size, # dim of the value embeddings
word_vocab_size, # size of the value vocabulary
word_padding_idx, # idx of <pad>
feat_vec_size, # dim of the pos embeddings
feat_vec_exponent, # instead of feat_vec_size
feat_vocab_size, # size of the pos vocabulary
feat_padding_idx, # idx of <pad>
merge="concat", # decide to merge the pos and value
merge_activation='ReLU', # used if merge is mlp
dropout=0,
ent_idx=None):
super().__init__()
assert ent_idx is not None
self.ent_idx = ent_idx
self.word_padding_idx = word_padding_idx
self.word_vec_size = word_vec_size
if feat_vec_size < 0:
if not 0 < feat_vec_exponent <= 1:
raise ValueError('feat_vec_exponent should be between 0 and 1')
feat_vec_size = int(feat_vocab_size ** feat_vec_exponent)
self.value_embeddings = torch.nn.Embedding(word_vocab_size,
word_vec_size, padding_idx=word_padding_idx)
self.pos_embeddings = torch.nn.Embedding(feat_vocab_size,
feat_vec_size, padding_idx=feat_padding_idx)
self._merge = merge
if merge is None:
self.embedding_size = self.word_vec_size
elif merge == 'concat':
self.embedding_size = self.word_vec_size + self.feat_vec_size
elif merge == 'sum':
assert self.word_vec_size == self.feat_vec_size
self.embedding_size = self.word_vec_size
elif merge == 'mlp':
self.embedding_size = self.word_vec_size
val_dim = self.value_embeddings.embedding_dim
pos_dim = self.pos_embeddings.embedding_dim
in_dim = val_dim + pos_dim
self.merge = torch.nn.Linear(in_dim, val_dim)
if merge_activation is None:
self.activation = None
elif merge_activation == 'ReLU':
self.activation = torch.nn.ReLU()
elif merge_activation == 'Tanh':
self.activation = torch.nn.Tanh()
else:
raise ValueError(f'Unknown activation {merge_activation}')
else:
raise ValueError('merge should be one of [None|concat|sum|mlp]')
@property
def word_lut(self):
"""Word look-up table."""
return self.value_embeddings
def load_pretrained_vectors(self, emb_file):
"""
place holder for onmt compatibility
"""
if emb_file:
raise NotImplementedError
def forward(self, inputs):
# unpack the inputs as cell values and pos (column name)
values, pos = [item.squeeze(2) for item in inputs.split(1, dim=2)]
# embed them separatly and maybe merge them
values = self.value_embeddings(values)
pos = self.pos_embeddings(pos)
if self._merge is None:
return values, pos
if self._merge == 'sum':
values = values + pos
return values, pos
values = torch.cat((values, pos), 2)
if self._merge == 'concat':
return values, pos
if self._merge == 'mlp':
values = self.merge(values)
if self.activation:
values = self.activation(values)
return values, pos
| 4,278 | 37.54955 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/models/stacked_rnn.py | """ Implementation of ONMT RNN for Input Feeding Decoding """
import torch
import torch.nn as nn
class StackedLSTM(nn.Module):
"""
Our own implementation of stacked LSTM.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input_feed, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(input_feed, (h_0[i], c_0[i]))
input_feed = h_1_i
if i + 1 != self.num_layers:
input_feed = self.dropout(input_feed)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input_feed, (h_1, c_1)
class StackedGRU(nn.Module):
"""
Our own implementation of stacked GRU.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRU, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(nn.GRUCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input_feed, hidden):
h_1 = []
for i, layer in enumerate(self.layers):
h_1_i = layer(input_feed, hidden[0][i])
input_feed = h_1_i
if i + 1 != self.num_layers:
input_feed = self.dropout(input_feed)
h_1 += [h_1_i]
h_1 = torch.stack(h_1)
return input_feed, (h_1,)
| 1,994 | 29.227273 | 66 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/models/model.py | """ Onmt NMT Model base class definition """
import torch.nn as nn
class NMTModel(nn.Module):
"""
Core trainable object in OpenNMT. Implements a trainable interface
for a simple, generic encoder + decoder model.
Args:
encoder (onmt.encoders.EncoderBase): an encoder object
decoder (onmt.decoders.DecoderBase): a decoder object
"""
def __init__(self, encoder, decoder):
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, lengths, bptt=False, with_align=False):
"""Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (Tensor): A source sequence passed to encoder.
typically for inputs this will be a padded `LongTensor`
of size ``(len, batch, features)``. However, may be an
image or other generic input depending on encoder.
tgt (LongTensor): A target sequence passed to decoder.
Size ``(tgt_len, batch, features)``.
lengths(LongTensor): The src lengths, pre-padding ``(batch,)``.
bptt (Boolean): A flag indicating if truncated bptt is set.
If reset then init_state
with_align (Boolean): A flag indicating whether output alignment,
Only valid for transformer decoder.
Returns:
(FloatTensor, dict[str, FloatTensor]):
* decoder output ``(tgt_len, batch, hidden)``
* dictionary attention dists of ``(tgt_len, batch, src_len)``
"""
dec_in = tgt[:-1] # exclude last target from inputs
enc_state, memory_bank, lengths = self.encoder(src, lengths)
if bptt is False:
self.decoder.init_state(src, memory_bank, enc_state)
dec_out, attns = self.decoder(dec_in, memory_bank,
memory_lengths=lengths,
with_align=with_align)
return dec_out, attns
def update_dropout(self, dropout):
self.encoder.update_dropout(dropout)
self.decoder.update_dropout(dropout)
| 2,218 | 37.929825 | 77 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/models/model_saver.py | import os
import torch
from collections import deque
from onmt.utils.logging import logger
from copy import deepcopy
def build_model_saver(model_opt, opt, model, fields, optim):
model_saver = ModelSaver(opt.save_model,
model,
model_opt,
fields,
optim,
opt.keep_checkpoint)
return model_saver
class ModelSaverBase(object):
"""Base class for model saving operations
Inherited classes must implement private methods:
* `_save`
* `_rm_checkpoint
"""
def __init__(self, base_path, model, model_opt, fields, optim,
keep_checkpoint=-1):
self.base_path = base_path
self.model = model
self.model_opt = model_opt
self.fields = fields
self.optim = optim
self.last_saved_step = None
self.keep_checkpoint = keep_checkpoint
if keep_checkpoint > 0:
self.checkpoint_queue = deque([], maxlen=keep_checkpoint)
def save(self, step, moving_average=None):
"""Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic
"""
if self.keep_checkpoint == 0 or step == self.last_saved_step:
return
save_model = self.model
if moving_average:
model_params_data = []
for avg, param in zip(moving_average, save_model.parameters()):
model_params_data.append(param.data)
param.data = avg.data
chkpt, chkpt_name = self._save(step, save_model)
self.last_saved_step = step
if moving_average:
for param_data, param in zip(model_params_data,
save_model.parameters()):
param.data = param_data
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name)
def _save(self, step):
"""Save a resumable checkpoint.
Args:
step (int): step number
Returns:
(object, str):
* checkpoint: the saved object
* checkpoint_name: name (or path) of the saved checkpoint
"""
raise NotImplementedError()
def _rm_checkpoint(self, name):
"""Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath)
"""
raise NotImplementedError()
class ModelSaver(ModelSaverBase):
"""Simple model saver to filesystem"""
def _save(self, step, model):
model_state_dict = model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = model.generator.state_dict()
# NOTE: We need to trim the vocab to remove any unk tokens that
# were not originally here.
vocab = deepcopy(self.fields)
for side in ["src", "tgt"]:
keys_to_pop = []
if hasattr(vocab[side], "fields"):
unk_token = vocab[side].fields[0][1].vocab.itos[0]
for key, value in vocab[side].fields[0][1].vocab.stoi.items():
if value == 0 and key != unk_token:
keys_to_pop.append(key)
for key in keys_to_pop:
vocab[side].fields[0][1].vocab.stoi.pop(key, None)
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': vocab,
'opt': self.model_opt,
'optim': self.optim.state_dict(),
}
logger.info("Saving checkpoint %s_step_%d.pt" % (self.base_path, step))
checkpoint_path = '%s_step_%d.pt' % (self.base_path, step)
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _rm_checkpoint(self, name):
os.remove(name)
| 4,230 | 30.340741 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/models/sru.py | """ SRU Implementation """
# flake8: noqa
import subprocess
import platform
import os
import re
import configargparse
import torch
import torch.nn as nn
from torch.autograd import Function
from collections import namedtuple
# For command-line option parsing
class CheckSRU(configargparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(CheckSRU, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values == 'SRU':
check_sru_requirement(abort=True)
# Check pass, set the args.
setattr(namespace, self.dest, values)
# This SRU version implements its own cuda-level optimization,
# so it requires that:
# 1. `cupy` and `pynvrtc` python package installed.
# 2. pytorch is built with cuda support.
# 3. library path set: export LD_LIBRARY_PATH=<cuda lib path>.
def check_sru_requirement(abort=False):
"""
Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False.
"""
# Check 1.
try:
if platform.system() == 'Windows':
subprocess.check_output('pip freeze | findstr cupy', shell=True)
subprocess.check_output('pip freeze | findstr pynvrtc',
shell=True)
else: # Unix-like systems
subprocess.check_output('pip freeze | grep -w cupy', shell=True)
subprocess.check_output('pip freeze | grep -w pynvrtc',
shell=True)
except subprocess.CalledProcessError:
if not abort:
return False
raise AssertionError("Using SRU requires 'cupy' and 'pynvrtc' "
"python packages installed.")
# Check 2.
if torch.cuda.is_available() is False:
if not abort:
return False
raise AssertionError("Using SRU requires pytorch built with cuda.")
# Check 3.
pattern = re.compile(".*cuda/lib.*")
ld_path = os.getenv('LD_LIBRARY_PATH', "")
if re.match(pattern, ld_path) is None:
if not abort:
return False
raise AssertionError("Using SRU requires setting cuda lib path, e.g. "
"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.")
return True
SRU_CODE = """
extern "C" {
__forceinline__ __device__ float sigmoidf(float x)
{
return 1.f / (1.f + expf(-x));
}
__forceinline__ __device__ float reluf(float x)
{
return (x > 0.f) ? x : 0.f;
}
__global__ void sru_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
for (int row = 0; row < len; ++row)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u;
xp += ncols_x;
cp += ncols;
hp += ncols;
}
}
__global__ void sru_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len,
const int batch, const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *xp = (k == 3) ? (x + col + (len-1)*ncols) : (up + 3);
const float *cp = c + col + (len-1)*ncols;
const float *ghp = grad_h + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float *gxp = (k == 3) ? (grad_x + col + (len-1)*ncols) : (gup + 3);
for (int row = len-1; row >= 0; --row)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u;
xp -= ncols_x;
cp -= ncols;
gup -= ncols_u;
gxp -= ncols_x;
ghp -= ncols;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
__global__ void sru_bi_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
assert ((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const int d2 = d*2;
const bool flip = (col%d2) >= d;
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
if (flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
hp += (len-1)*ncols;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u_;
xp += ncols_x_;
cp += ncols_;
hp += ncols_;
}
}
__global__ void sru_bi_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len, const int batch,
const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
assert((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const int d2 = d*2;
const bool flip = ((col%d2) >= d);
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
const float *cp = c + col;
const float *ghp = grad_h + col;
float *gup = grad_u + (col*k);
float *gxp = (k == 3) ? (grad_x + col) : (gup + 3);
if (!flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
ghp += (len-1)*ncols;
gup += (len-1)*ncols_u;
gxp += (len-1)*ncols_x;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (cnt<len-1)?(*(cp-ncols_)):(*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u_;
xp -= ncols_x_;
cp -= ncols_;
gup -= ncols_u_;
gxp -= ncols_x_;
ghp -= ncols_;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
}
"""
SRU_FWD_FUNC, SRU_BWD_FUNC = None, None
SRU_BiFWD_FUNC, SRU_BiBWD_FUNC = None, None
SRU_STREAM = None
def load_sru_mod():
global SRU_FWD_FUNC, SRU_BWD_FUNC, SRU_BiFWD_FUNC, SRU_BiBWD_FUNC
global SRU_STREAM
if check_sru_requirement():
from cupy.cuda import function
from pynvrtc.compiler import Program
# This sets up device to use.
device = torch.device("cuda")
tmp_ = torch.rand(1, 1).to(device)
sru_prog = Program(SRU_CODE.encode('utf-8'),
'sru_prog.cu'.encode('utf-8'))
sru_ptx = sru_prog.compile()
sru_mod = function.Module()
sru_mod.load(bytes(sru_ptx.encode()))
SRU_FWD_FUNC = sru_mod.get_function('sru_fwd')
SRU_BWD_FUNC = sru_mod.get_function('sru_bwd')
SRU_BiFWD_FUNC = sru_mod.get_function('sru_bi_fwd')
SRU_BiBWD_FUNC = sru_mod.get_function('sru_bi_bwd')
stream = namedtuple('Stream', ['ptr'])
SRU_STREAM = stream(ptr=torch.cuda.current_stream().cuda_stream)
class SRU_Compute(Function):
def __init__(self, activation_type, d_out, bidirectional=False):
SRU_Compute.maybe_load_sru_mod()
super(SRU_Compute, self).__init__()
self.activation_type = activation_type
self.d_out = d_out
self.bidirectional = bidirectional
@staticmethod
def maybe_load_sru_mod():
global SRU_FWD_FUNC
if SRU_FWD_FUNC is None:
load_sru_mod()
def forward(self, u, x, bias, init=None, mask_h=None):
bidir = 2 if self.bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k // 2 if self.bidirectional else k
ncols = batch * d * bidir
thread_per_block = min(512, ncols)
num_block = (ncols - 1) // thread_per_block + 1
init_ = x.new(ncols).zero_() if init is None else init
size = (length, batch, d * bidir) if x.dim() == 3 else (batch, d * bidir)
c = x.new(*size)
h = x.new(*size)
FUNC = SRU_FWD_FUNC if not self.bidirectional else SRU_BiFWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
length,
batch,
d,
k_,
h.data_ptr(),
c.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
self.save_for_backward(u, x, bias, init, mask_h)
self.intermediate = c
if x.dim() == 2:
last_hidden = c
elif self.bidirectional:
# -> directions x batch x dim
last_hidden = torch.stack((c[-1, :, :d], c[0, :, d:]))
else:
last_hidden = c[-1]
return h, last_hidden
def backward(self, grad_h, grad_last):
if self.bidirectional:
grad_last = torch.cat((grad_last[0], grad_last[1]), 1)
bidir = 2 if self.bidirectional else 1
u, x, bias, init, mask_h = self.saved_tensors
c = self.intermediate
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k // 2 if self.bidirectional else k
ncols = batch * d * bidir
thread_per_block = min(512, ncols)
num_block = (ncols - 1) // thread_per_block + 1
init_ = x.new(ncols).zero_() if init is None else init
grad_u = u.new(*u.size())
grad_bias = x.new(2, batch, d * bidir)
grad_init = x.new(batch, d * bidir)
# For DEBUG
# size = (length, batch, x.size(-1)) \
# if x.dim() == 3 else (batch, x.size(-1))
# grad_x = x.new(*x.size()) if k_ == 3 else x.new(*size).zero_()
# Normal use
grad_x = x.new(*x.size()) if k_ == 3 else None
FUNC = SRU_BWD_FUNC if not self.bidirectional else SRU_BiBWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
c.data_ptr(),
grad_h.contiguous().data_ptr(),
grad_last.contiguous().data_ptr(),
length,
batch,
d,
k_,
grad_u.data_ptr(),
grad_x.data_ptr() if k_ == 3 else 0,
grad_bias.data_ptr(),
grad_init.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
return grad_u, grad_x, grad_bias.sum(1).view(-1), grad_init, None
class SRUCell(nn.Module):
def __init__(self, n_in, n_out, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
super(SRUCell, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.rnn_dropout = rnn_dropout
self.dropout = dropout
self.bidirectional = bidirectional
self.activation_type = 2 if use_relu else (1 if use_tanh else 0)
out_size = n_out * 2 if bidirectional else n_out
k = 4 if n_in != out_size else 3
self.size_per_dir = n_out * k
self.weight = nn.Parameter(torch.Tensor(
n_in,
self.size_per_dir * 2 if bidirectional else self.size_per_dir
))
self.bias = nn.Parameter(torch.Tensor(
n_out * 4 if bidirectional else n_out * 2
))
self.init_weight()
def init_weight(self):
val_range = (3.0 / self.n_in)**0.5
self.weight.data.uniform_(-val_range, val_range)
self.bias.data.zero_()
def set_bias(self, bias_val=0):
n_out = self.n_out
if self.bidirectional:
self.bias.data[n_out * 2:].zero_().add_(bias_val)
else:
self.bias.data[n_out:].zero_().add_(bias_val)
def forward(self, input, c0=None):
assert input.dim() == 2 or input.dim() == 3
n_in, n_out = self.n_in, self.n_out
batch = input.size(-2)
if c0 is None:
c0 = input.data.new(
batch, n_out if not self.bidirectional else n_out * 2
).zero_()
if self.training and (self.rnn_dropout > 0):
mask = self.get_dropout_mask_((batch, n_in), self.rnn_dropout)
x = input * mask.expand_as(input)
else:
x = input
x_2d = x if x.dim() == 2 else x.contiguous().view(-1, n_in)
u = x_2d.mm(self.weight)
if self.training and (self.dropout > 0):
bidir = 2 if self.bidirectional else 1
mask_h = self.get_dropout_mask_(
(batch, n_out * bidir), self.dropout)
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0, mask_h
)
else:
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0
)
return h, c
def get_dropout_mask_(self, size, p):
w = self.weight.data
return w.new(*size).bernoulli_(1 - p).div_(1 - p)
class SRU(nn.Module):
"""
Implementation of "Training RNNs as Fast as CNNs"
:cite:`DBLP:journals/corr/abs-1709-02755`
TODO: turn to pytorch's implementation when it is available.
This implementation is adpoted from the author of the paper:
https://github.com/taolei87/sru/blob/master/cuda_functional.py.
Args:
input_size (int): input to model
hidden_size (int): hidden dimension
num_layers (int): number of layers
dropout (float): dropout to use (stacked)
rnn_dropout (float): dropout to use (recurrent)
bidirectional (bool): bidirectional
use_tanh (bool): activation
use_relu (bool): activation
"""
def __init__(self, input_size, hidden_size,
num_layers=2, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
# An entry check here, will catch on train side and translate side
# if requirements are not satisfied.
check_sru_requirement(abort=True)
super(SRU, self).__init__()
self.n_in = input_size
self.n_out = hidden_size
self.depth = num_layers
self.dropout = dropout
self.rnn_dropout = rnn_dropout
self.rnn_lst = nn.ModuleList()
self.bidirectional = bidirectional
self.out_size = hidden_size * 2 if bidirectional else hidden_size
for i in range(num_layers):
sru_cell = SRUCell(
n_in=self.n_in if i == 0 else self.out_size,
n_out=self.n_out,
dropout=dropout if i + 1 != num_layers else 0,
rnn_dropout=rnn_dropout,
bidirectional=bidirectional,
use_tanh=use_tanh,
use_relu=use_relu,
)
self.rnn_lst.append(sru_cell)
def set_bias(self, bias_val=0):
for l in self.rnn_lst:
l.set_bias(bias_val)
def forward(self, input, c0=None, return_hidden=True):
assert input.dim() == 3 # (len, batch, n_in)
dir_ = 2 if self.bidirectional else 1
if c0 is None:
zeros = input.data.new(
input.size(1), self.n_out * dir_
).zero_()
c0 = [zeros for i in range(self.depth)]
else:
if isinstance(c0, tuple):
# RNNDecoderState wraps hidden as a tuple.
c0 = c0[0]
assert c0.dim() == 3 # (depth, batch, dir_*n_out)
c0 = [h.squeeze(0) for h in c0.chunk(self.depth, 0)]
prevx = input
lstc = []
for i, rnn in enumerate(self.rnn_lst):
h, c = rnn(prevx, c0[i])
prevx = h
lstc.append(c)
if self.bidirectional:
# fh -> (layers*directions) x batch x dim
fh = torch.cat(lstc)
else:
fh = torch.stack(lstc)
if return_hidden:
return prevx, fh
else:
return prevx
| 24,302 | 36.27454 | 81 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/bin/average_models.py | #!/usr/bin/env python
import argparse
import torch
def average_models(model_files, fp32=False):
vocab = None
opt = None
avg_model = None
avg_generator = None
for i, model_file in enumerate(model_files):
m = torch.load(model_file, map_location='cpu')
model_weights = m['model']
generator_weights = m['generator']
if fp32:
for k, v in model_weights.items():
model_weights[k] = v.float()
for k, v in generator_weights.items():
generator_weights[k] = v.float()
if i == 0:
vocab, opt = m['vocab'], m['opt']
avg_model = model_weights
avg_generator = generator_weights
else:
for (k, v) in avg_model.items():
avg_model[k].mul_(i).add_(model_weights[k]).div_(i + 1)
for (k, v) in avg_generator.items():
avg_generator[k].mul_(i).add_(generator_weights[k]).div_(i + 1)
final = {"vocab": vocab, "opt": opt, "optim": None,
"generator": avg_generator, "model": avg_model}
return final
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("-models", "-m", nargs="+", required=True,
help="List of models")
parser.add_argument("-output", "-o", required=True,
help="Output file")
parser.add_argument("-fp32", "-f", action="store_true",
help="Cast params to float32")
opt = parser.parse_args()
final = average_models(opt.models, opt.fp32)
torch.save(final, opt.output)
if __name__ == "__main__":
main()
| 1,665 | 29.290909 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/bin/train.py | #!/usr/bin/env python
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from itertools import cycle
def train(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
set_random_seed(opt.seed, False)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(train_iter, queues, semaphore, opt,),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def batch_producer(generator_to_serve, queues, semaphore, opt):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
# generator_to_serve = iter(generator_to_serve)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch(device_id):
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch(0)
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
if isinstance(b.src, tuple):
b.src = tuple([_.to(torch.device(device_id))
for _ in b.src])
else:
b.src = b.src.to(torch.device(device_id))
b.tgt = b.tgt.to(torch.device(device_id))
b.indices = b.indices.to(torch.device(device_id))
b.alignment = b.alignment.to(torch.device(device_id)) \
if hasattr(b, 'alignment') else None
b.src_map = b.src_map.to(torch.device(device_id)) \
if hasattr(b, 'src_map') else None
b.align = b.align.to(torch.device(device_id)) \
if hasattr(b, 'align') else None
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch(device_id)
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
train(opt)
if __name__ == "__main__":
main()
| 6,849 | 31.77512 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/bin/preprocess.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pre-process Data / features files and build vocabulary
"""
import codecs
import glob
import gc
import torch
from collections import Counter, defaultdict
from onmt.utils.logging import init_logger, logger
from onmt.utils.misc import split_corpus
import onmt.inputters as inputters
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import _build_fields_vocab,\
_load_vocab
from functools import partial
from multiprocessing import Pool
def check_existing_pt_files(opt, corpus_type, ids, existing_fields):
""" Check if there are existing .pt files to avoid overwriting them """
existing_shards = []
for maybe_id in ids:
if maybe_id:
shard_base = corpus_type + "_" + maybe_id
else:
shard_base = corpus_type
pattern = opt.save_data + '.{}.*.pt'.format(shard_base)
if glob.glob(pattern):
if opt.overwrite:
maybe_overwrite = ("will be overwritten because "
"`-overwrite` option is set.")
else:
maybe_overwrite = ("won't be overwritten, pass the "
"`-overwrite` option if you want to.")
logger.warning("Shards for corpus {} already exist, {}"
.format(shard_base, maybe_overwrite))
existing_shards += [maybe_id]
return existing_shards
def process_one_shard(corpus_params, params):
corpus_type, fields, src_reader, tgt_reader, align_reader, opt,\
existing_fields, src_vocab, tgt_vocab = corpus_params
i, (src_shard, tgt_shard, align_shard, maybe_id, filter_pred) = params
# create one counter per shard
sub_sub_counter = defaultdict(Counter)
assert len(src_shard) == len(tgt_shard)
logger.info("Building shard %d." % i)
src_data = {"reader": src_reader, "data": src_shard, "dir": opt.src_dir}
tgt_data = {"reader": tgt_reader, "data": tgt_shard, "dir": None}
align_data = {"reader": align_reader, "data": align_shard, "dir": None}
_readers, _data, _dir = inputters.Dataset.config(
[('src', src_data), ('tgt', tgt_data), ('align', align_data)])
dataset = inputters.Dataset(
fields, readers=_readers, data=_data, dirs=_dir,
sort_key=inputters.str2sortkey[opt.data_type],
filter_pred=filter_pred
)
if corpus_type == "train" and existing_fields is None:
for ex in dataset.examples:
for name, field in fields.items():
if ((opt.data_type == "audio") and (name == "src")):
continue
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and
src_vocab is not None) or \
(sub_n == 'tgt' and
tgt_vocab is not None)
if (hasattr(sub_f, 'sequential')
and sub_f.sequential and not has_vocab):
val = fd
sub_sub_counter[sub_n].update(val)
if maybe_id:
shard_base = corpus_type + "_" + maybe_id
else:
shard_base = corpus_type
data_path = "{:s}.{:s}.{:d}.pt".\
format(opt.save_data, shard_base, i)
logger.info(" * saving %sth %s data shard to %s."
% (i, shard_base, data_path))
dataset.save(data_path)
del dataset.examples
gc.collect()
del dataset
gc.collect()
return sub_sub_counter
def maybe_load_vocab(corpus_type, counters, opt):
src_vocab = None
tgt_vocab = None
existing_fields = None
if corpus_type == "train":
if opt.src_vocab != "":
try:
logger.info("Using existing vocabulary...")
existing_fields = torch.load(opt.src_vocab)
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
src_vocab, src_vocab_size = _load_vocab(
opt.src_vocab, "src", counters,
opt.src_words_min_frequency)
if opt.tgt_vocab != "":
tgt_vocab, tgt_vocab_size = _load_vocab(
opt.tgt_vocab, "tgt", counters,
opt.tgt_words_min_frequency)
return src_vocab, tgt_vocab, existing_fields
def build_save_dataset(corpus_type, fields, src_reader, tgt_reader,
align_reader, opt):
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
counters = defaultdict(Counter)
srcs = opt.train_src
tgts = opt.train_tgt
ids = opt.train_ids
aligns = opt.train_align
elif corpus_type == 'valid':
counters = None
srcs = [opt.valid_src]
tgts = [opt.valid_tgt]
ids = [None]
aligns = [opt.valid_align]
src_vocab, tgt_vocab, existing_fields = maybe_load_vocab(
corpus_type, counters, opt)
existing_shards = check_existing_pt_files(
opt, corpus_type, ids, existing_fields)
# every corpus has shards, no new one
if existing_shards == ids and not opt.overwrite:
return
def shard_iterator(srcs, tgts, ids, aligns, existing_shards,
existing_fields, corpus_type, opt):
"""
Builds a single iterator yielding every shard of every corpus.
"""
for src, tgt, maybe_id, maybe_align in zip(srcs, tgts, ids, aligns):
if maybe_id in existing_shards:
if opt.overwrite:
logger.warning("Overwrite shards for corpus {}"
.format(maybe_id))
else:
if corpus_type == "train":
assert existing_fields is not None,\
("A 'vocab.pt' file should be passed to "
"`-src_vocab` when adding a corpus to "
"a set of already existing shards.")
logger.warning("Ignore corpus {} because "
"shards already exist"
.format(maybe_id))
continue
if ((corpus_type == "train" or opt.filter_valid)
and tgt is not None):
filter_pred = partial(
inputters.filter_example,
use_src_len=opt.data_type == "text",
max_src_len=opt.src_seq_length,
max_tgt_len=opt.tgt_seq_length)
else:
filter_pred = None
src_shards = split_corpus(src, opt.shard_size)
tgt_shards = split_corpus(tgt, opt.shard_size)
align_shards = split_corpus(maybe_align, opt.shard_size)
for i, (ss, ts, a_s) in enumerate(
zip(src_shards, tgt_shards, align_shards)):
yield (i, (ss, ts, a_s, maybe_id, filter_pred))
shard_iter = shard_iterator(srcs, tgts, ids, aligns, existing_shards,
existing_fields, corpus_type, opt)
with Pool(opt.num_threads) as p:
dataset_params = (corpus_type, fields, src_reader, tgt_reader,
align_reader, opt, existing_fields,
src_vocab, tgt_vocab)
func = partial(process_one_shard, dataset_params)
for sub_counter in p.imap(func, shard_iter):
if sub_counter is not None:
for key, value in sub_counter.items():
counters[key].update(value)
if corpus_type == "train":
vocab_path = opt.save_data + '.vocab.pt'
if existing_fields is None:
fields = _build_fields_vocab(
fields, counters, opt.data_type,
opt.share_vocab, opt.vocab_size_multiple,
opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab_size, opt.tgt_words_min_frequency)
else:
fields = existing_fields
torch.save(fields, vocab_path)
def build_save_vocab(train_dataset, fields, opt):
fields = inputters.build_vocab(
train_dataset, fields, opt.data_type, opt.share_vocab,
opt.src_vocab, opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab, opt.tgt_vocab_size, opt.tgt_words_min_frequency,
vocab_size_multiple=opt.vocab_size_multiple
)
vocab_path = opt.save_data + '.vocab.pt'
torch.save(fields, vocab_path)
def count_features(path):
"""
path: location of a corpus file with whitespace-delimited tokens and
│-delimited features within the token
returns: the number of features in the dataset
"""
with codecs.open(path, "r", "utf-8") as f:
first_tok = f.readline().split(None, 1)[0]
return len(first_tok.split(u"│")) - 1
def preprocess(opt):
ArgumentParser.validate_preprocess_args(opt)
torch.manual_seed(opt.seed)
init_logger(opt.log_file)
logger.info("Extracting features...")
src_nfeats = 0
tgt_nfeats = 0
for src, tgt in zip(opt.train_src, opt.train_tgt):
src_nfeats += count_features(src) if opt.data_type == 'text' \
else 0
tgt_nfeats += count_features(tgt) # tgt always text so far
logger.info(" * number of source features: %d." % src_nfeats)
logger.info(" * number of target features: %d." % tgt_nfeats)
logger.info("Building `Fields` object...")
fields = inputters.get_fields(
opt.data_type,
src_nfeats,
tgt_nfeats,
dynamic_dict=opt.dynamic_dict,
with_align=opt.train_align[0] is not None,
src_truncate=opt.src_seq_length_trunc,
tgt_truncate=opt.tgt_seq_length_trunc)
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader["text"].from_opt(opt)
align_reader = inputters.str2reader["text"].from_opt(opt)
logger.info("Building & saving training data...")
build_save_dataset(
'train', fields, src_reader, tgt_reader, align_reader, opt)
if opt.valid_src and opt.valid_tgt:
logger.info("Building & saving validation data...")
build_save_dataset(
'valid', fields, src_reader, tgt_reader, align_reader, opt)
def _get_parser():
parser = ArgumentParser(description='preprocess.py')
opts.config_opts(parser)
opts.preprocess_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
preprocess(opt)
if __name__ == "__main__":
main()
| 11,018 | 35.97651 | 76 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/decoders/transformer.py | """
Implementation of "Attention is All You Need"
"""
import torch
import torch.nn as nn
from onmt.decoders.decoder import DecoderBase
from onmt.modules import MultiHeadedAttention, AverageAttention
from onmt.modules.position_ffn import PositionwiseFeedForward
from onmt.utils.misc import sequence_mask
class TransformerDecoderLayer(nn.Module):
"""
Args:
d_model (int): the dimension of keys/values/queries in
:class:`MultiHeadedAttention`, also the input size of
the first-layer of the :class:`PositionwiseFeedForward`.
heads (int): the number of heads for MultiHeadedAttention.
d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`.
dropout (float): dropout probability.
self_attn_type (string): type of self-attention scaled-dot, average
"""
def __init__(self, d_model, heads, d_ff, dropout, attention_dropout,
self_attn_type="scaled-dot", max_relative_positions=0,
aan_useffn=False, full_context_alignment=False,
alignment_heads=None):
super(TransformerDecoderLayer, self).__init__()
if self_attn_type == "scaled-dot":
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout,
max_relative_positions=max_relative_positions)
elif self_attn_type == "average":
self.self_attn = AverageAttention(d_model,
dropout=attention_dropout,
aan_useffn=aan_useffn)
self.context_attn = MultiHeadedAttention(
heads, d_model, dropout=attention_dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)
self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)
self.drop = nn.Dropout(dropout)
self.full_context_alignment = full_context_alignment
self.alignment_heads = alignment_heads
def forward(self, *args, **kwargs):
""" Extend _forward for (possibly) multiple decoder pass:
1. Always a default (future masked) decoder forward pass,
2. Possibly a second future aware decoder pass for joint learn
full context alignement.
Args:
* All arguments of _forward.
with_align (bool): whether return alignment attention.
Returns:
(FloatTensor, FloatTensor, FloatTensor or None):
* output ``(batch_size, 1, model_dim)``
* top_attn ``(batch_size, 1, src_len)``
* attn_align ``(batch_size, 1, src_len)`` or None
"""
with_align = kwargs.pop('with_align', False)
output, attns = self._forward(*args, **kwargs)
top_attn = attns[:, 0, :, :].contiguous()
attn_align = None
if with_align:
if self.full_context_alignment:
# return _, (B, Q_len, K_len)
_, attns = self._forward(*args, **kwargs, future=True)
if self.alignment_heads is not None:
attns = attns[:, :self.alignment_heads, :, :].contiguous()
# layer average attention across heads, get ``(B, Q, K)``
# Case 1: no full_context, no align heads -> layer avg baseline
# Case 2: no full_context, 1 align heads -> guided align
# Case 3: full_context, 1 align heads -> full cte guided align
attn_align = attns.mean(dim=1)
return output, top_attn, attn_align
def _forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,
layer_cache=None, step=None, future=False):
""" A naive forward pass for transformer decoder.
# TODO: change 1 to T as T could be 1 or tgt_len
Args:
inputs (FloatTensor): ``(batch_size, 1, model_dim)``
memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)``
src_pad_mask (LongTensor): ``(batch_size, 1, src_len)``
tgt_pad_mask (LongTensor): ``(batch_size, 1, 1)``
Returns:
(FloatTensor, FloatTensor):
* output ``(batch_size, 1, model_dim)``
* attns ``(batch_size, head, 1, src_len)``
"""
dec_mask = None
if step is None:
tgt_len = tgt_pad_mask.size(-1)
if not future: # apply future_mask, result mask in (B, T, T)
future_mask = torch.ones(
[tgt_len, tgt_len],
device=tgt_pad_mask.device,
dtype=torch.uint8)
future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len)
# BoolTensor was introduced in pytorch 1.2
try:
future_mask = future_mask.bool()
except AttributeError:
pass
dec_mask = torch.gt(tgt_pad_mask + future_mask, 0)
else: # only mask padding, result mask in (B, 1, T)
dec_mask = tgt_pad_mask
input_norm = self.layer_norm_1(inputs)
if isinstance(self.self_attn, MultiHeadedAttention):
query, _ = self.self_attn(input_norm, input_norm, input_norm,
mask=dec_mask,
layer_cache=layer_cache,
attn_type="self")
elif isinstance(self.self_attn, AverageAttention):
query, _ = self.self_attn(input_norm, mask=dec_mask,
layer_cache=layer_cache, step=step)
query = self.drop(query) + inputs
query_norm = self.layer_norm_2(query)
mid, attns = self.context_attn(memory_bank, memory_bank, query_norm,
mask=src_pad_mask,
layer_cache=layer_cache,
attn_type="context")
output = self.feed_forward(self.drop(mid) + query)
return output, attns
def update_dropout(self, dropout, attention_dropout):
self.self_attn.update_dropout(attention_dropout)
self.context_attn.update_dropout(attention_dropout)
self.feed_forward.update_dropout(dropout)
self.drop.p = dropout
class TransformerDecoder(DecoderBase):
"""The Transformer decoder from "Attention is All You Need".
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
.. mermaid::
graph BT
A[input]
B[multi-head self-attn]
BB[multi-head src-attn]
C[feed forward]
O[output]
A --> B
B --> BB
BB --> C
C --> O
Args:
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
copy_attn (bool): if using a separate copy attention
self_attn_type (str): type of self-attention scaled-dot, average
dropout (float): dropout parameters
embeddings (onmt.modules.Embeddings):
embeddings to use, should have positional encodings
"""
def __init__(self, num_layers, d_model, heads, d_ff,
copy_attn, self_attn_type, dropout, attention_dropout,
embeddings, max_relative_positions, aan_useffn,
full_context_alignment, alignment_layer,
alignment_heads=None):
super(TransformerDecoder, self).__init__()
self.embeddings = embeddings
# Decoder State
self.state = {}
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout,
attention_dropout, self_attn_type=self_attn_type,
max_relative_positions=max_relative_positions,
aan_useffn=aan_useffn,
full_context_alignment=full_context_alignment,
alignment_heads=alignment_heads)
for i in range(num_layers)])
# previously, there was a GlobalAttention module here for copy
# attention. But it was never actually used -- the "copy" attention
# just reuses the context attention.
self._copy = copy_attn
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.alignment_layer = alignment_layer
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.dec_layers,
opt.dec_rnn_size,
opt.heads,
opt.transformer_ff,
opt.copy_attn,
opt.self_attn_type,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.dropout,
embeddings,
opt.max_relative_positions,
opt.aan_useffn,
opt.full_context_alignment,
opt.alignment_layer,
alignment_heads=opt.alignment_heads)
def init_state(self, src, memory_bank, enc_hidden):
"""Initialize decoder state."""
self.state["src"] = src
self.state["cache"] = None
def map_state(self, fn):
def _recursive_map(struct, batch_dim=0):
for k, v in struct.items():
if v is not None:
if isinstance(v, dict):
_recursive_map(v)
else:
struct[k] = fn(v, batch_dim)
self.state["src"] = fn(self.state["src"], 1)
if self.state["cache"] is not None:
_recursive_map(self.state["cache"])
def detach_state(self):
self.state["src"] = self.state["src"].detach()
def forward(self, tgt, memory_bank, step=None, **kwargs):
"""Decode, possibly stepwise."""
if step == 0:
self._init_cache(memory_bank)
tgt_words = tgt[:, :, 0].transpose(0, 1)
emb = self.embeddings(tgt, step=step)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_memory_bank = memory_bank.transpose(0, 1).contiguous()
pad_idx = self.embeddings.word_padding_idx
src_lens = kwargs["memory_lengths"]
src_max_len = self.state["src"].shape[0]
src_pad_mask = ~sequence_mask(src_lens, src_max_len).unsqueeze(1)
tgt_pad_mask = tgt_words.data.eq(pad_idx).unsqueeze(1) # [B, 1, T_tgt]
with_align = kwargs.pop('with_align', False)
attn_aligns = []
for i, layer in enumerate(self.transformer_layers):
layer_cache = self.state["cache"]["layer_{}".format(i)] \
if step is not None else None
output, attn, attn_align = layer(
output,
src_memory_bank,
src_pad_mask,
tgt_pad_mask,
layer_cache=layer_cache,
step=step,
with_align=with_align)
if attn_align is not None:
attn_aligns.append(attn_align)
output = self.layer_norm(output)
dec_outs = output.transpose(0, 1).contiguous()
attn = attn.transpose(0, 1).contiguous()
attns = {"std": attn}
if self._copy:
attns["copy"] = attn
if with_align:
attns["align"] = attn_aligns[self.alignment_layer] # `(B, Q, K)`
# attns["align"] = torch.stack(attn_aligns, 0).mean(0) # All avg
# TODO change the way attns is returned dict => list or tuple (onnx)
return dec_outs, attns
def _init_cache(self, memory_bank):
self.state["cache"] = {}
batch_size = memory_bank.size(1)
depth = memory_bank.size(-1)
for i, layer in enumerate(self.transformer_layers):
layer_cache = {"memory_keys": None, "memory_values": None}
if isinstance(layer.self_attn, AverageAttention):
layer_cache["prev_g"] = torch.zeros((batch_size, 1, depth),
device=memory_bank.device)
else:
layer_cache["self_keys"] = None
layer_cache["self_values"] = None
self.state["cache"]["layer_{}".format(i)] = layer_cache
def update_dropout(self, dropout, attention_dropout):
self.embeddings.update_dropout(dropout)
for layer in self.transformer_layers:
layer.update_dropout(dropout, attention_dropout)
| 12,530 | 38.282132 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/decoders/decoder.py | import torch
import torch.nn as nn
from onmt.models.stacked_rnn import StackedLSTM, StackedGRU
from onmt.modules import context_gate_factory, GlobalAttention
from onmt.utils.rnn_factory import rnn_factory
from onmt.utils.misc import aeq
class DecoderBase(nn.Module):
"""Abstract class for decoders.
Args:
attentional (bool): The decoder returns non-empty attention.
"""
def __init__(self, attentional=True):
super(DecoderBase, self).__init__()
self.attentional = attentional
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor.
Subclasses should override this method.
"""
raise NotImplementedError
class RNNDecoderBase(DecoderBase):
"""Base recurrent attention-based decoder class.
Specifies the interface used by different decoder types
and required by :class:`~onmt.models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
G[Decoder State]
H[Decoder State]
I[Outputs]
F[memory_bank]
A--emb-->C
A--emb-->D
A--emb-->E
H-->C
C-- attn --- F
D-- attn --- F
E-- attn --- F
C-->I
D-->I
E-->I
E-->G
F---I
Args:
rnn_type (str):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional_encoder (bool) : use with a bidirectional encoder
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
attn_type (str) : see :class:`~onmt.modules.GlobalAttention`
attn_func (str) : see :class:`~onmt.modules.GlobalAttention`
coverage_attn (str): see :class:`~onmt.modules.GlobalAttention`
context_gate (str): see :class:`~onmt.modules.ContextGate`
copy_attn (bool): setup a separate copy attention mechanism
dropout (float) : dropout value for :class:`torch.nn.Dropout`
embeddings (onmt.modules.Embeddings): embedding module to use
reuse_copy_attn (bool): reuse the attention for copying
copy_attn_type (str): The copy attention style. See
:class:`~onmt.modules.GlobalAttention`.
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type="general", attn_func="softmax",
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=0.0, embeddings=None,
reuse_copy_attn=False, copy_attn_type="general"):
super(RNNDecoderBase, self).__init__(
attentional=attn_type != "none" and attn_type is not None)
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embeddings = embeddings
self.dropout = nn.Dropout(dropout)
# Decoder state
self.state = {}
# Build the RNN.
self.rnn = self._build_rnn(rnn_type,
input_size=self._input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = context_gate_factory(
context_gate, self._input_size,
hidden_size, hidden_size, hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
if not self.attentional:
if self._coverage:
raise ValueError("Cannot use coverage term with no attention.")
self.attn = None
else:
self.attn = GlobalAttention(
hidden_size, coverage=coverage_attn,
attn_type=attn_type, attn_func=attn_func
)
if copy_attn and not reuse_copy_attn:
if copy_attn_type == "none" or copy_attn_type is None:
raise ValueError(
"Cannot use copy_attn with copy_attn_type none")
self.copy_attn = GlobalAttention(
hidden_size, attn_type=copy_attn_type, attn_func=attn_func
)
else:
self.copy_attn = None
self._reuse_copy_attn = reuse_copy_attn and copy_attn
if self._reuse_copy_attn and not self.attentional:
raise ValueError("Cannot reuse copy attention with no attention.")
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.rnn_type,
opt.brnn,
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout[0] if type(opt.dropout) is list
else opt.dropout,
embeddings,
opt.reuse_copy_attn,
opt.copy_attn_type)
def init_state(self, src, memory_bank, encoder_final):
"""Initialize decoder state with last state of the encoder."""
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat([hidden[0:hidden.size(0):2],
hidden[1:hidden.size(0):2]], 2)
return hidden
if isinstance(encoder_final, tuple): # LSTM
self.state["hidden"] = tuple(_fix_enc_hidden(enc_hid)
for enc_hid in encoder_final)
else: # GRU
self.state["hidden"] = (_fix_enc_hidden(encoder_final), )
# Init the input feed.
batch_size = self.state["hidden"][0].size(1)
h_size = (batch_size, self.hidden_size)
self.state["input_feed"] = \
self.state["hidden"][0].data.new(*h_size).zero_().unsqueeze(0)
self.state["coverage"] = None
def map_state(self, fn):
self.state["hidden"] = tuple(fn(h, 1) for h in self.state["hidden"])
self.state["input_feed"] = fn(self.state["input_feed"], 1)
if self._coverage and self.state["coverage"] is not None:
self.state["coverage"] = fn(self.state["coverage"], 1)
def detach_state(self):
self.state["hidden"] = tuple(h.detach() for h in self.state["hidden"])
self.state["input_feed"] = self.state["input_feed"].detach()
def forward(self, tgt, memory_bank, memory_lengths=None, step=None,
**kwargs):
"""
Args:
tgt (LongTensor): sequences of padded tokens
``(tgt_len, batch, nfeats)``.
memory_bank (FloatTensor): vectors from the encoder
``(src_len, batch, hidden)``.
memory_lengths (LongTensor): the padded source lengths
``(batch,)``.
Returns:
(FloatTensor, dict[str, FloatTensor]):
* dec_outs: output from the decoder (after attn)
``(tgt_len, batch, hidden)``.
* attns: distribution over src at each tgt
``(tgt_len, batch, src_len)``.
"""
dec_state, dec_outs, attns = self._run_forward_pass(
tgt, memory_bank, memory_lengths=memory_lengths)
# Update the state with the result.
if not isinstance(dec_state, tuple):
dec_state = (dec_state,)
self.state["hidden"] = dec_state
self.state["input_feed"] = dec_outs[-1].unsqueeze(0)
self.state["coverage"] = None
if "coverage" in attns:
self.state["coverage"] = attns["coverage"][-1].unsqueeze(0)
# Concatenates sequence of tensors along a new dimension.
# NOTE: v0.3 to 0.4: dec_outs / attns[*] may not be list
# (in particular in case of SRU) it was not raising error in 0.3
# since stack(Variable) was allowed.
# In 0.4, SRU returns a tensor that shouldn't be stacke
if type(dec_outs) == list:
dec_outs = torch.stack(dec_outs)
for k in attns:
if type(attns[k]) == list:
attns[k] = torch.stack(attns[k])
return dec_outs, attns
def update_dropout(self, dropout):
self.dropout.p = dropout
self.embeddings.update_dropout(dropout)
class StdRNNDecoder(RNNDecoderBase):
"""Standard fully batched RNN decoder with attention.
Faster implementation, uses CuDNN for implementation.
See :class:`~onmt.decoders.decoder.RNNDecoderBase` for options.
Based around the approach from
"Neural Machine Translation By Jointly Learning To Align and Translate"
:cite:`Bahdanau2015`
Implemented without input_feeding and currently with no `coverage_attn`
or `copy_attn` support.
"""
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
``(len, batch, nfeats)``.
memory_bank (FloatTensor): output(tensor sequence) from the
encoder RNN of size ``(src_len, batch, hidden_size)``.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
(Tensor, List[FloatTensor], Dict[str, List[FloatTensor]):
* dec_state: final hidden state from the decoder.
* dec_outs: an array of output of every time
step from the decoder.
* attns: a dictionary of different
type of attention Tensor array of every time
step from the decoder.
"""
assert self.copy_attn is None # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
attns = {}
emb = self.embeddings(tgt)
if isinstance(self.rnn, nn.GRU):
rnn_output, dec_state = self.rnn(emb, self.state["hidden"][0])
else:
rnn_output, dec_state = self.rnn(emb, self.state["hidden"])
# Check
tgt_len, tgt_batch, _ = tgt.size()
output_len, output_batch, _ = rnn_output.size()
aeq(tgt_len, output_len)
aeq(tgt_batch, output_batch)
# Calculate the attention.
if not self.attentional:
dec_outs = rnn_output
else:
dec_outs, p_attn = self.attn(
rnn_output.transpose(0, 1).contiguous(),
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths
)
attns["std"] = p_attn
# Calculate the context gate.
if self.context_gate is not None:
dec_outs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
dec_outs.view(-1, dec_outs.size(2))
)
dec_outs = dec_outs.view(tgt_len, tgt_batch, self.hidden_size)
dec_outs = self.dropout(dec_outs)
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, **kwargs):
rnn, _ = rnn_factory(rnn_type, **kwargs)
return rnn
@property
def _input_size(self):
return self.embeddings.embedding_size
class InputFeedRNNDecoder(RNNDecoderBase):
"""Input feeding based decoder.
See :class:`~onmt.decoders.decoder.RNNDecoderBase` for options.
Based around the input feeding approach from
"Effective Approaches to Attention-based Neural Machine Translation"
:cite:`Luong2015`
.. mermaid::
graph BT
A[Input n-1]
AB[Input n]
subgraph RNN
E[Pos n-1]
F[Pos n]
E --> F
end
G[Encoder]
H[memory_bank n-1]
A --> E
AB --> F
E --> H
G --> H
"""
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
"""
# Additional args check.
input_feed = self.state["input_feed"].squeeze(0)
input_feed_batch, _ = input_feed.size()
_, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
dec_outs = []
attns = {}
if self.attn is not None:
attns["std"] = []
if self.copy_attn is not None or self._reuse_copy_attn:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
dec_state = self.state["hidden"]
coverage = self.state["coverage"].squeeze(0) \
if self.state["coverage"] is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for emb_t in emb.split(1):
decoder_input = torch.cat([emb_t.squeeze(0), input_feed], 1)
rnn_output, dec_state = self.rnn(decoder_input, dec_state)
if self.attentional:
decoder_output, p_attn = self.attn(
rnn_output,
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths)
attns["std"].append(p_attn)
else:
decoder_output = rnn_output
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
dec_outs += [decoder_output]
# Update the coverage attention.
if self._coverage:
coverage = p_attn if coverage is None else p_attn + coverage
attns["coverage"] += [coverage]
if self.copy_attn is not None:
_, copy_attn = self.copy_attn(
decoder_output, memory_bank.transpose(0, 1))
attns["copy"] += [copy_attn]
elif self._reuse_copy_attn:
attns["copy"] = attns["std"]
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert rnn_type != "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
stacked_cell = StackedLSTM if rnn_type == "LSTM" else StackedGRU
return stacked_cell(num_layers, input_size, hidden_size, dropout)
@property
def _input_size(self):
"""Using input feed by concatenating input with attention vectors."""
return self.embeddings.embedding_size + self.hidden_size
def update_dropout(self, dropout):
self.dropout.p = dropout
self.rnn.dropout.p = dropout
self.embeddings.update_dropout(dropout)
| 15,510 | 34.172336 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/decoders/ensemble.py | """Ensemble decoding.
Decodes using multiple models simultaneously,
combining their prediction distributions by averaging.
All models in the ensemble must share a target vocabulary.
"""
import torch
import torch.nn as nn
from onmt.encoders.encoder import EncoderBase
from onmt.decoders.decoder import DecoderBase
from onmt.models import NMTModel
import onmt.model_builder
class EnsembleDecoderOutput(object):
"""Wrapper around multiple decoder final hidden states."""
def __init__(self, model_dec_outs):
self.model_dec_outs = tuple(model_dec_outs)
def squeeze(self, dim=None):
"""Delegate squeeze to avoid modifying
:func:`onmt.translate.translator.Translator.translate_batch()`
"""
return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_dec_outs])
def __getitem__(self, index):
return self.model_dec_outs[index]
class EnsembleEncoder(EncoderBase):
"""Dummy Encoder that delegates to individual real Encoders."""
def __init__(self, model_encoders):
super(EnsembleEncoder, self).__init__()
self.model_encoders = nn.ModuleList(model_encoders)
def forward(self, src, lengths=None):
enc_hidden, memory_bank, _ = zip(*[
model_encoder(src, lengths)
for model_encoder in self.model_encoders])
return enc_hidden, memory_bank, lengths
class EnsembleDecoder(DecoderBase):
"""Dummy Decoder that delegates to individual real Decoders."""
def __init__(self, model_decoders):
model_decoders = nn.ModuleList(model_decoders)
attentional = any([dec.attentional for dec in model_decoders])
super(EnsembleDecoder, self).__init__(attentional)
self.model_decoders = model_decoders
def forward(self, tgt, memory_bank, memory_lengths=None, step=None,
**kwargs):
"""See :func:`onmt.decoders.decoder.DecoderBase.forward()`."""
# Memory_lengths is a single tensor shared between all models.
# This assumption will not hold if Translator is modified
# to calculate memory_lengths as something other than the length
# of the input.
dec_outs, attns = zip(*[
model_decoder(
tgt, memory_bank[i],
memory_lengths=memory_lengths, step=step)
for i, model_decoder in enumerate(self.model_decoders)])
mean_attns = self.combine_attns(attns)
return EnsembleDecoderOutput(dec_outs), mean_attns
def combine_attns(self, attns):
result = {}
for key in attns[0].keys():
result[key] = torch.stack(
[attn[key] for attn in attns if attn[key] is not None]).mean(0)
return result
def init_state(self, src, memory_bank, enc_hidden):
""" See :obj:`RNNDecoderBase.init_state()` """
for i, model_decoder in enumerate(self.model_decoders):
model_decoder.init_state(src, memory_bank[i], enc_hidden[i])
def map_state(self, fn):
for model_decoder in self.model_decoders:
model_decoder.map_state(fn)
class EnsembleGenerator(nn.Module):
"""
Dummy Generator that delegates to individual real Generators,
and then averages the resulting target distributions.
"""
def __init__(self, model_generators, raw_probs=False):
super(EnsembleGenerator, self).__init__()
self.model_generators = nn.ModuleList(model_generators)
self._raw_probs = raw_probs
def forward(self, hidden, attn=None, src_map=None):
"""
Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary.
"""
distributions = torch.stack(
[mg(h) if attn is None else mg(h, attn, src_map)
for h, mg in zip(hidden, self.model_generators)]
)
if self._raw_probs:
return torch.log(torch.exp(distributions).mean(0))
else:
return distributions.mean(0)
class EnsembleModel(NMTModel):
"""Dummy NMTModel wrapping individual real NMTModels."""
def __init__(self, models, raw_probs=False):
encoder = EnsembleEncoder(model.encoder for model in models)
decoder = EnsembleDecoder(model.decoder for model in models)
super(EnsembleModel, self).__init__(encoder, decoder)
self.generator = EnsembleGenerator(
[model.generator for model in models], raw_probs)
self.models = nn.ModuleList(models)
def load_test_model(opt):
"""Read in multiple models for ensemble."""
shared_fields = None
shared_model_opt = None
models = []
for model_path in opt.models:
fields, model, model_opt = \
onmt.model_builder.load_test_model(opt, model_path=model_path)
if shared_fields is None:
shared_fields = fields
else:
for key, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(key, field)]
for sn, sf in f_iter:
if sf is not None and 'vocab' in sf.__dict__:
sh_field = shared_fields[key]
try:
sh_f_iter = iter(sh_field)
except TypeError:
sh_f_iter = [(key, sh_field)]
sh_f_dict = dict(sh_f_iter)
assert sf.vocab.stoi == sh_f_dict[sn].vocab.stoi, \
"Ensemble models must use the same " \
"preprocessed data"
models.append(model)
if shared_model_opt is None:
shared_model_opt = model_opt
ensemble_model = EnsembleModel(models, opt.avg_raw_probs)
return shared_fields, ensemble_model, shared_model_opt
| 5,956 | 37.432258 | 79 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/decoders/cnn_decoder.py | """Implementation of the CNN Decoder part of
"Convolutional Sequence to Sequence Learning"
"""
import torch
import torch.nn as nn
from onmt.modules import ConvMultiStepAttention, GlobalAttention
from onmt.utils.cnn_factory import shape_transform, GatedConv
from onmt.decoders.decoder import DecoderBase
SCALE_WEIGHT = 0.5 ** 0.5
class CNNDecoder(DecoderBase):
"""Decoder based on "Convolutional Sequence to Sequence Learning"
:cite:`DBLP:journals/corr/GehringAGYD17`.
Consists of residual convolutional layers, with ConvMultiStepAttention.
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, cnn_kernel_width, dropout, embeddings,
copy_attn_type):
super(CNNDecoder, self).__init__()
self.cnn_kernel_width = cnn_kernel_width
self.embeddings = embeddings
# Decoder State
self.state = {}
input_size = self.embeddings.embedding_size
self.linear = nn.Linear(input_size, hidden_size)
self.conv_layers = nn.ModuleList(
[GatedConv(hidden_size, cnn_kernel_width, dropout, True)
for i in range(num_layers)]
)
self.attn_layers = nn.ModuleList(
[ConvMultiStepAttention(hidden_size) for i in range(num_layers)]
)
# CNNDecoder has its own attention mechanism.
# Set up a separate copy attention layer if needed.
assert not copy_attn, "Copy mechanism not yet tested in conv2conv"
if copy_attn:
self.copy_attn = GlobalAttention(
hidden_size, attn_type=copy_attn_type)
else:
self.copy_attn = None
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.copy_attn,
opt.cnn_kernel_width,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
embeddings,
opt.copy_attn_type)
def init_state(self, _, memory_bank, enc_hidden):
"""Init decoder state."""
self.state["src"] = (memory_bank + enc_hidden) * SCALE_WEIGHT
self.state["previous_input"] = None
def map_state(self, fn):
self.state["src"] = fn(self.state["src"], 1)
if self.state["previous_input"] is not None:
self.state["previous_input"] = fn(self.state["previous_input"], 1)
def detach_state(self):
self.state["previous_input"] = self.state["previous_input"].detach()
def forward(self, tgt, memory_bank, step=None, **kwargs):
""" See :obj:`onmt.modules.RNNDecoderBase.forward()`"""
if self.state["previous_input"] is not None:
tgt = torch.cat([self.state["previous_input"], tgt], 0)
dec_outs = []
attns = {"std": []}
if self.copy_attn is not None:
attns["copy"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_memory_bank_c = self.state["src"].transpose(0, 1).contiguous()
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = torch.zeros(x.size(0), x.size(1), self.cnn_kernel_width - 1, 1)
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_memory_bank_t, src_memory_bank_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
dec_outs = output.transpose(0, 1).contiguous()
if self.state["previous_input"] is not None:
dec_outs = dec_outs[self.state["previous_input"].size(0):]
attn = attn[:, self.state["previous_input"].size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self.copy_attn is not None:
attns["copy"] = attn
# Update the state.
self.state["previous_input"] = tgt
# TODO change the way attns is returned dict => list or tuple (onnx)
return dec_outs, attns
def update_dropout(self, dropout):
for layer in self.conv_layers:
layer.dropout.p = dropout
| 4,890 | 35.5 | 78 | py |
data-to-text-hierarchical | data-to-text-hierarchical-master/onmt/decoders/hierarchical_decoder.py | """Same as normal RNNDecoder but using hierarchical attention"""
import torch
from .decoder import RNNDecoderBase
from ..modules import HierarchicalAttention
from ..models.stacked_rnn import StackedLSTM, StackedGRU
from ..utils.rnn_factory import rnn_factory
from ..utils.misc import aeq, nwise, sequence_mask
from torch.nn.utils.rnn import pad_sequence
import onmt
class ContainsNaN(Exception):
pass
def _check_for_nan(tensor):
if (tensor!=tensor).any():
raise ContainsNaN
class HierarchicalRNNDecoder(RNNDecoderBase):
"""Input feeding based decoder.
See :class:`~onmt.decoders.decoder.RNNDecoderBase` for options.
Based around the input feeding approach from
"Effective Approaches to Attention-based Neural Machine Translation"
:cite:`Luong2015`
.. mermaid::
graph BT
A[Input n-1]
AB[Input n]
subgraph RNN
E[Pos n-1]
F[Pos n]
E --> F
end
G[Encoder]
H[memory_bank n-1]
A --> E
AB --> F
E --> H
G --> H
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type="general", attn_func="softmax",
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=0.0, embeddings=None,
reuse_copy_attn=False, copy_attn_type="general", use_pos=True):
super(RNNDecoderBase, self).__init__(
attentional=attn_type != "none" and attn_type is not None)
assert not coverage_attn
self.ent_size = onmt.ENT_SIZE
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
if isinstance(hidden_size, tuple):
self.hidden_size = hidden_size[0]
self.units_size = hidden_size[1]
elif isinstance(hidden_size, int):
self.hidden_size = hidden_size
self.units_size = hidden_size
else:
raise ValueError('hidden_size should be one or two ints')
self.embeddings = embeddings
self.dropout = torch.nn.Dropout(dropout)
# Decoder state
self.state = {}
# Build the RNN.
self.rnn_type = rnn_type
self.num_layers = num_layers
self.rnn = self._build_rnn(rnn_type=self.rnn_type,
input_size=self._input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = context_gate_factory(
context_gate, self._input_size,
self.hidden_size, self.hidden_size, self.hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
if not self.attentional:
if self._coverage:
raise ValueError("Cannot use coverage term with no attention.")
self.attn = None
else:
self.attn = HierarchicalAttention(
(self.hidden_size, self.units_size),
coverage=coverage_attn, use_pos=use_pos,
attn_type=attn_type, attn_func=attn_func)
if copy_attn and not reuse_copy_attn:
if copy_attn_type == "none" or copy_attn_type is None:
raise ValueError(
"Cannot use copy_attn with copy_attn_type none")
self.copy_attn = HierarchicalAttention(
(self.hidden_size, self.units_size),
attn_type=copy_attn_type, attn_func=attn_func,
use_pos=use_pos)
else:
self.copy_attn = None
self._reuse_copy_attn = reuse_copy_attn and copy_attn
if self._reuse_copy_attn and not self.attentional:
raise ValueError("Cannot reuse copy attention with no attention.")
def init_state(self, src, memory_bank, encoder_final):
"""
Here we initialize the hidden state of the hierarchical_decoder
This function only works with the hierarchical_transformer.
encoder_final is [1, bsz, dim]. We need to:
- convert it to a tuple if decoder_rnn is LSTM
- Duplicate it to mimick a multi-layer encoder
"""
hidden = encoder_final.repeat(self.num_layers, 1, 1)
self.state["hidden"] = (hidden, hidden) if self.rnn_type == "LSTM" else (hidden, )
# Init the input feed.
batch_size = self.state["hidden"][0].size(1)
h_size = (batch_size, self.hidden_size)
self.state["input_feed"] = \
self.state["hidden"][0].data.new(*h_size).zero_().unsqueeze(0)
self.state["coverage"] = None
# super().init_state(src, memory_bank, encoder_final)
# num_dirs = 2 if self.bidirectional_encoder else 1
# def f(hidden):
# # The encoder hidden is (layers*directions) x batch x dim
# tmp_dim, bsz, dim = hidden.shape
# hidden = hidden.view(-1, num_dirs, bsz, dim)
# num_layers = hidden.size(0)
# delta = num_layers - self.num_layers
# if delta > 0:
# return hidden[delta:, ...].view(-1, bsz, dim)
# elif delta < 0:
# for _ in range(delta):
# hidden = torch.cat((hidden, hidden[-1].unsqueeze(0)), dim=0)
# return hidden.view(-1, bsz, dim)
# return hidden.view(-1, bsz, dim)
# if isinstance(encoder_final, tuple):
# hidden = tuple(f(h) for h in encoder_final)
# else:
# hidden = f(encoder_final)
@classmethod
def from_opt(cls, opt, embeddings, dims=None):
"""Alternate constructor."""
"""
dims are the dimention of the table embeddings
It is a tuple of size two (dim_value, dim_pos)
"""
if dims is None:
dims = opt.dec_rnn_size
return cls(
rnn_type=opt.rnn_type,
bidirectional_encoder=opt.brnn,
num_layers=opt.dec_layers,
hidden_size=dims,
attn_type=opt.global_attention,
attn_func=opt.global_attention_function,
coverage_attn=opt.coverage_attn,
context_gate=opt.context_gate,
copy_attn=opt.copy_attn,
dropout=opt.dropout[0] if type(opt.dropout) is list
else opt.dropout,
embeddings=embeddings,
reuse_copy_attn=opt.reuse_copy_attn,
copy_attn_type=opt.copy_attn_type,
use_pos=opt.use_pos)
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
"""
memory_bank is a tuple (chunks, units, pos_embs, unit_mask, chunk_mask)
"""
# Additional args check.
input_feed = self.state["input_feed"].squeeze(0)
input_feed_batch, _ = input_feed.size()
tgt_len, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
dec_outs = []
attns = dict()
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
dec_state = self.state["hidden"]
coverage = self.state["coverage"].squeeze(0) \
if self.state["coverage"] is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for emb_t in emb.split(1):
decoder_input = torch.cat([emb_t.squeeze(0), input_feed], 1)
rnn_output, dec_state = self.rnn(decoder_input, dec_state)
# If the RNN has several layers, we only use the last one to compute
# the attention scores. In pytorch, the outs of the rnn are:
# - rnn_output [seq_len, bsz, n-directions * hidden_size]
# - dec_state [n-layers * n-directions, bsz, hidden_size] * 2
# We unpack the rnn_output on dim 2 and keep the last layer
if self.attentional:
decoder_output, ret = self.attn(
rnn_output,
memory_bank)
for postfix, tensor in ret.items():
key = 'std' + postfix
attns.setdefault(key, list())
attns[key].append(tensor)
else:
decoder_output = rnn_output
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
dec_outs += [decoder_output]
# Update the coverage attention.
if self._coverage:
coverage = p_attn if coverage is None else p_attn + coverage
attns.setdefault('coverage', list())
attns['coverage'].append(coverage)
if self.copy_attn is not None:
_, copy_attn = self.copy_attn(
decoder_output, memory_bank)
for postfix, tensor in copy_attn.items():
key = 'copy' + postfix
attns.setdefault(key, list())
attns[key].append(tensor)
# this trick should save memory because torch.stack creates a new
# object.
for key in list(attns):
if key.startswith('std'):
attns[key] = torch.stack(attns[key])
if self._reuse_copy_attn:
attns[key.replace('std', 'copy')] = attns[key]
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert rnn_type != "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
stacked_cell = StackedLSTM if rnn_type == "LSTM" else StackedGRU
return stacked_cell(num_layers, input_size, hidden_size, dropout)
@property
def _input_size(self):
"""Using input feed by concatenating input with attention vectors."""
return self.embeddings.embedding_size + self.hidden_size
def update_dropout(self, dropout):
self.dropout.p = dropout
self.rnn.dropout.p = dropout
self.embeddings.update_dropout(dropout) | 10,819 | 36.439446 | 90 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.