| import torch |
| from torch.nn import functional as F |
| import torch.jit |
|
|
|
|
| def script_method(fn, _rcb=None): |
| return fn |
|
|
|
|
| def script(obj, optimize=True, _frames_up=0, _rcb=None): |
| return obj |
|
|
|
|
| torch.jit.script_method = script_method |
| torch.jit.script = script |
|
|
|
|
| def init_weights(m, mean=0.0, std=0.01): |
| classname = m.__class__.__name__ |
| if classname.find("Conv") != -1: |
| m.weight.data.normal_(mean, std) |
|
|
|
|
| def get_padding(kernel_size, dilation=1): |
| return int((kernel_size*dilation - dilation)/2) |
|
|
|
|
| def intersperse(lst, item): |
| result = [item] * (len(lst) * 2 + 1) |
| result[1::2] = lst |
| return result |
|
|
|
|
| def slice_segments(x, ids_str, segment_size=4): |
| ret = torch.zeros_like(x[:, :, :segment_size]) |
| for i in range(x.size(0)): |
| idx_str = ids_str[i] |
| idx_end = idx_str + segment_size |
| ret[i] = x[i, :, idx_str:idx_end] |
| return ret |
|
|
|
|
| def rand_slice_segments(x, x_lengths=None, segment_size=4): |
| b, d, t = x.size() |
| if x_lengths is None: |
| x_lengths = t |
| ids_str_max = x_lengths - segment_size + 1 |
| ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) |
| ret = slice_segments(x, ids_str, segment_size) |
| return ret, ids_str |
|
|
|
|
| def subsequent_mask(length): |
| mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) |
| return mask |
|
|
|
|
| @torch.jit.script |
| def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): |
| n_channels_int = n_channels[0] |
| in_act = input_a + input_b |
| t_act = torch.tanh(in_act[:, :n_channels_int, :]) |
| s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) |
| acts = t_act * s_act |
| return acts |
|
|
|
|
| def convert_pad_shape(pad_shape): |
| l = pad_shape[::-1] |
| pad_shape = [item for sublist in l for item in sublist] |
| return pad_shape |
|
|
|
|
| def sequence_mask(length, max_length=None): |
| if max_length is None: |
| max_length = length.max() |
| x = torch.arange(max_length, dtype=length.dtype, device=length.device) |
| return x.unsqueeze(0) < length.unsqueeze(1) |
|
|
|
|
| def generate_path(duration, mask): |
| """ |
| duration: [b, 1, t_x] |
| mask: [b, 1, t_y, t_x] |
| """ |
| device = duration.device |
| |
| b, _, t_y, t_x = mask.shape |
| cum_duration = torch.cumsum(duration, -1) |
| |
| cum_duration_flat = cum_duration.view(b * t_x) |
| path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) |
| path = path.view(b, t_x, t_y) |
| path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] |
| path = path.unsqueeze(1).transpose(2,3) * mask |
| return path |
|
|