repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
TTS
|
TTS-master/TTS/vocoder/models/wavegrad.py
|
import numpy as np
import torch
from torch import nn
from torch.nn.utils import weight_norm
from ..layers.wavegrad import DBlock, FiLM, UBlock, Conv1d
class Wavegrad(nn.Module):
# pylint: disable=dangerous-default-value
def __init__(self,
in_channels=80,
out_channels=1,
use_weight_norm=False,
y_conv_channels=32,
x_conv_channels=768,
dblock_out_channels=[128, 128, 256, 512],
ublock_out_channels=[512, 512, 256, 128, 128],
upsample_factors=[5, 5, 3, 2, 2],
upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 4, 8],
[1, 2, 4, 8], [1, 2, 4, 8]]):
super().__init__()
self.use_weight_norm = use_weight_norm
self.hop_len = np.prod(upsample_factors)
self.noise_level = None
self.num_steps = None
self.beta = None
self.alpha = None
self.alpha_hat = None
self.noise_level = None
self.c1 = None
self.c2 = None
self.sigma = None
# dblocks
self.y_conv = Conv1d(1, y_conv_channels, 5, padding=2)
self.dblocks = nn.ModuleList([])
ic = y_conv_channels
for oc, df in zip(dblock_out_channels, reversed(upsample_factors)):
self.dblocks.append(DBlock(ic, oc, df))
ic = oc
# film
self.film = nn.ModuleList([])
ic = y_conv_channels
for oc in reversed(ublock_out_channels):
self.film.append(FiLM(ic, oc))
ic = oc
# ublocks
self.ublocks = nn.ModuleList([])
ic = x_conv_channels
for oc, uf, ud in zip(ublock_out_channels, upsample_factors, upsample_dilations):
self.ublocks.append(UBlock(ic, oc, uf, ud))
ic = oc
self.x_conv = Conv1d(in_channels, x_conv_channels, 3, padding=1)
self.out_conv = Conv1d(oc, out_channels, 3, padding=1)
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, spectrogram, noise_scale):
shift_and_scale = []
x = self.y_conv(x)
shift_and_scale.append(self.film[0](x, noise_scale))
for film, layer in zip(self.film[1:], self.dblocks):
x = layer(x)
shift_and_scale.append(film(x, noise_scale))
x = self.x_conv(spectrogram)
for layer, (film_shift, film_scale) in zip(self.ublocks,
reversed(shift_and_scale)):
x = layer(x, film_shift, film_scale)
x = self.out_conv(x)
return x
def load_noise_schedule(self, path):
beta = np.load(path, allow_pickle=True).item()['beta']
self.compute_noise_level(beta)
@torch.no_grad()
def inference(self, x, y_n=None):
""" x: B x D X T """
if y_n is None:
y_n = torch.randn(x.shape[0], 1, self.hop_len * x.shape[-1], dtype=torch.float32).to(x)
else:
y_n = torch.FloatTensor(y_n).unsqueeze(0).unsqueeze(0).to(x)
sqrt_alpha_hat = self.noise_level.to(x)
for n in range(len(self.alpha) - 1, -1, -1):
y_n = self.c1[n] * (y_n -
self.c2[n] * self.forward(y_n, x, sqrt_alpha_hat[n].repeat(x.shape[0])))
if n > 0:
z = torch.randn_like(y_n)
y_n += self.sigma[n - 1] * z
y_n.clamp_(-1.0, 1.0)
return y_n
def compute_y_n(self, y_0):
"""Compute noisy audio based on noise schedule"""
self.noise_level = self.noise_level.to(y_0)
if len(y_0.shape) == 3:
y_0 = y_0.squeeze(1)
s = torch.randint(0, self.num_steps - 1, [y_0.shape[0]])
l_a, l_b = self.noise_level[s], self.noise_level[s+1]
noise_scale = l_a + torch.rand(y_0.shape[0]).to(y_0) * (l_b - l_a)
noise_scale = noise_scale.unsqueeze(1)
noise = torch.randn_like(y_0)
noisy_audio = noise_scale * y_0 + (1.0 - noise_scale**2)**0.5 * noise
return noise.unsqueeze(1), noisy_audio.unsqueeze(1), noise_scale[:, 0]
def compute_noise_level(self, beta):
"""Compute noise schedule parameters"""
self.num_steps = len(beta)
alpha = 1 - beta
alpha_hat = np.cumprod(alpha)
noise_level = np.concatenate([[1.0], alpha_hat ** 0.5], axis=0)
noise_level = alpha_hat ** 0.5
# pylint: disable=not-callable
self.beta = torch.tensor(beta.astype(np.float32))
self.alpha = torch.tensor(alpha.astype(np.float32))
self.alpha_hat = torch.tensor(alpha_hat.astype(np.float32))
self.noise_level = torch.tensor(noise_level.astype(np.float32))
self.c1 = 1 / self.alpha**0.5
self.c2 = (1 - self.alpha) / (1 - self.alpha_hat)**0.5
self.sigma = ((1.0 - self.alpha_hat[:-1]) / (1.0 - self.alpha_hat[1:]) * self.beta[1:])**0.5
def remove_weight_norm(self):
for _, layer in enumerate(self.dblocks):
if len(layer.state_dict()) != 0:
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
layer.remove_weight_norm()
for _, layer in enumerate(self.film):
if len(layer.state_dict()) != 0:
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
layer.remove_weight_norm()
for _, layer in enumerate(self.ublocks):
if len(layer.state_dict()) != 0:
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
layer.remove_weight_norm()
nn.utils.remove_weight_norm(self.x_conv)
nn.utils.remove_weight_norm(self.out_conv)
nn.utils.remove_weight_norm(self.y_conv)
def apply_weight_norm(self):
for _, layer in enumerate(self.dblocks):
if len(layer.state_dict()) != 0:
layer.apply_weight_norm()
for _, layer in enumerate(self.film):
if len(layer.state_dict()) != 0:
layer.apply_weight_norm()
for _, layer in enumerate(self.ublocks):
if len(layer.state_dict()) != 0:
layer.apply_weight_norm()
self.x_conv = weight_norm(self.x_conv)
self.out_conv = weight_norm(self.out_conv)
self.y_conv = weight_norm(self.y_conv)
def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin
state = torch.load(checkpoint_path, map_location=torch.device('cpu'))
self.load_state_dict(state['model'])
if eval:
self.eval()
assert not self.training
if self.use_weight_norm:
self.remove_weight_norm()
betas = np.linspace(config['test_noise_schedule']['min_val'],
config['test_noise_schedule']['max_val'],
config['test_noise_schedule']['num_steps'])
self.compute_noise_level(betas)
else:
betas = np.linspace(config['train_noise_schedule']['min_val'],
config['train_noise_schedule']['max_val'],
config['train_noise_schedule']['num_steps'])
self.compute_noise_level(betas)
| 7,448
| 36.812183
| 121
|
py
|
TTS
|
TTS-master/TTS/vocoder/models/fullband_melgan_generator.py
|
import torch
from TTS.vocoder.models.melgan_generator import MelganGenerator
class FullbandMelganGenerator(MelganGenerator):
def __init__(self,
in_channels=80,
out_channels=1,
proj_kernel=7,
base_channels=512,
upsample_factors=(2, 8, 2, 2),
res_kernel=3,
num_res_blocks=4):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
proj_kernel=proj_kernel,
base_channels=base_channels,
upsample_factors=upsample_factors,
res_kernel=res_kernel,
num_res_blocks=num_res_blocks)
@torch.no_grad()
def inference(self, cond_features):
cond_features = cond_features.to(self.layers[1].weight.device)
cond_features = torch.nn.functional.pad(
cond_features,
(self.inference_padding, self.inference_padding),
'replicate')
return self.layers(cond_features)
| 1,108
| 34.774194
| 70
|
py
|
TTS
|
TTS-master/TTS/vocoder/models/__init__.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/TTS/vocoder/models/melgan_generator.py
|
import torch
from torch import nn
from torch.nn.utils import weight_norm
from TTS.vocoder.layers.melgan import ResidualStack
class MelganGenerator(nn.Module):
def __init__(self,
in_channels=80,
out_channels=1,
proj_kernel=7,
base_channels=512,
upsample_factors=(8, 8, 2, 2),
res_kernel=3,
num_res_blocks=3):
super(MelganGenerator, self).__init__()
# assert model parameters
assert (proj_kernel -
1) % 2 == 0, " [!] proj_kernel should be an odd number."
# setup additional model parameters
base_padding = (proj_kernel - 1) // 2
act_slope = 0.2
self.inference_padding = 2
# initial layer
layers = []
layers += [
nn.ReflectionPad1d(base_padding),
weight_norm(
nn.Conv1d(in_channels,
base_channels,
kernel_size=proj_kernel,
stride=1,
bias=True))
]
# upsampling layers and residual stacks
for idx, upsample_factor in enumerate(upsample_factors):
layer_in_channels = base_channels // (2**idx)
layer_out_channels = base_channels // (2**(idx + 1))
layer_filter_size = upsample_factor * 2
layer_stride = upsample_factor
layer_output_padding = upsample_factor % 2
layer_padding = upsample_factor // 2 + layer_output_padding
layers += [
nn.LeakyReLU(act_slope),
weight_norm(
nn.ConvTranspose1d(layer_in_channels,
layer_out_channels,
layer_filter_size,
stride=layer_stride,
padding=layer_padding,
output_padding=layer_output_padding,
bias=True)),
ResidualStack(
channels=layer_out_channels,
num_res_blocks=num_res_blocks,
kernel_size=res_kernel
)
]
layers += [nn.LeakyReLU(act_slope)]
# final layer
layers += [
nn.ReflectionPad1d(base_padding),
weight_norm(
nn.Conv1d(layer_out_channels,
out_channels,
proj_kernel,
stride=1,
bias=True)),
nn.Tanh()
]
self.layers = nn.Sequential(*layers)
def forward(self, c):
return self.layers(c)
def inference(self, c):
c = c.to(self.layers[1].weight.device)
c = torch.nn.functional.pad(
c,
(self.inference_padding, self.inference_padding),
'replicate')
return self.layers(c)
def remove_weight_norm(self):
for _, layer in enumerate(self.layers):
if len(layer.state_dict()) != 0:
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
layer.remove_weight_norm()
def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin
state = torch.load(checkpoint_path, map_location=torch.device('cpu'))
self.load_state_dict(state['model'])
if eval:
self.eval()
assert not self.training
self.remove_weight_norm()
| 3,691
| 33.830189
| 121
|
py
|
TTS
|
TTS-master/TTS/vocoder/datasets/wavegrad_dataset.py
|
import os
import glob
import torch
import random
import numpy as np
from torch.utils.data import Dataset
from multiprocessing import Manager
class WaveGradDataset(Dataset):
"""
WaveGrad Dataset searchs for all the wav files under root path
and converts them to acoustic features on the fly and returns
random segments of (audio, feature) couples.
"""
def __init__(self,
ap,
items,
seq_len,
hop_len,
pad_short,
conv_pad=2,
is_training=True,
return_segments=True,
use_noise_augment=False,
use_cache=False,
verbose=False):
self.ap = ap
self.item_list = items
self.seq_len = seq_len if return_segments else None
self.hop_len = hop_len
self.pad_short = pad_short
self.conv_pad = conv_pad
self.is_training = is_training
self.return_segments = return_segments
self.use_cache = use_cache
self.use_noise_augment = use_noise_augment
self.verbose = verbose
if return_segments:
assert seq_len % hop_len == 0, " [!] seq_len has to be a multiple of hop_len."
self.feat_frame_len = seq_len // hop_len + (2 * conv_pad)
# cache acoustic features
if use_cache:
self.create_feature_cache()
def create_feature_cache(self):
self.manager = Manager()
self.cache = self.manager.list()
self.cache += [None for _ in range(len(self.item_list))]
@staticmethod
def find_wav_files(path):
return glob.glob(os.path.join(path, '**', '*.wav'), recursive=True)
def __len__(self):
return len(self.item_list)
def __getitem__(self, idx):
item = self.load_item(idx)
return item
def load_test_samples(self, num_samples):
samples = []
return_segments = self.return_segments
self.return_segments = False
for idx in range(num_samples):
mel, audio = self.load_item(idx)
samples.append([mel, audio])
self.return_segments = return_segments
return samples
def load_item(self, idx):
""" load (audio, feat) couple """
# compute features from wav
wavpath = self.item_list[idx]
if self.use_cache and self.cache[idx] is not None:
audio = self.cache[idx]
else:
audio = self.ap.load_wav(wavpath)
if self.return_segments:
# correct audio length wrt segment length
if audio.shape[-1] < self.seq_len + self.pad_short:
audio = np.pad(audio, (0, self.seq_len + self.pad_short - len(audio)), \
mode='constant', constant_values=0.0)
assert audio.shape[-1] >= self.seq_len + self.pad_short, f"{audio.shape[-1]} vs {self.seq_len + self.pad_short}"
# correct the audio length wrt hop length
p = (audio.shape[-1] // self.hop_len + 1) * self.hop_len - audio.shape[-1]
audio = np.pad(audio, (0, p), mode='constant', constant_values=0.0)
if self.use_cache:
self.cache[idx] = audio
if self.return_segments:
max_start = len(audio) - self.seq_len
start = random.randint(0, max_start)
end = start + self.seq_len
audio = audio[start:end]
if self.use_noise_augment and self.is_training and self.return_segments:
audio = audio + (1 / 32768) * torch.randn_like(audio)
mel = self.ap.melspectrogram(audio)
mel = mel[..., :-1] # ignore the padding
audio = torch.from_numpy(audio).float()
mel = torch.from_numpy(mel).float().squeeze(0)
return (mel, audio)
@staticmethod
def collate_full_clips(batch):
"""This is used in tune_wavegrad.py.
It pads sequences to the max length."""
max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1]
max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0]
mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length])
audios = torch.zeros([len(batch), max_audio_length])
for idx, b in enumerate(batch):
mel = b[0]
audio = b[1]
mels[idx, :, :mel.shape[1]] = mel
audios[idx, :audio.shape[0]] = audio
return mels, audios
| 4,572
| 33.643939
| 128
|
py
|
TTS
|
TTS-master/TTS/vocoder/datasets/__init__.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/TTS/vocoder/datasets/gan_dataset.py
|
import os
import glob
import torch
import random
import numpy as np
from torch.utils.data import Dataset
from multiprocessing import Manager
class GANDataset(Dataset):
"""
GAN Dataset searchs for all the wav files under root path
and converts them to acoustic features on the fly and returns
random segments of (audio, feature) couples.
"""
def __init__(self,
ap,
items,
seq_len,
hop_len,
pad_short,
conv_pad=2,
is_training=True,
return_segments=True,
use_noise_augment=False,
use_cache=False,
verbose=False):
self.ap = ap
self.item_list = items
self.compute_feat = not isinstance(items[0], (tuple, list))
self.seq_len = seq_len
self.hop_len = hop_len
self.pad_short = pad_short
self.conv_pad = conv_pad
self.is_training = is_training
self.return_segments = return_segments
self.use_cache = use_cache
self.use_noise_augment = use_noise_augment
self.verbose = verbose
assert seq_len % hop_len == 0, " [!] seq_len has to be a multiple of hop_len."
self.feat_frame_len = seq_len // hop_len + (2 * conv_pad)
# map G and D instances
self.G_to_D_mappings = list(range(len(self.item_list)))
self.shuffle_mapping()
# cache acoustic features
if use_cache:
self.create_feature_cache()
def create_feature_cache(self):
self.manager = Manager()
self.cache = self.manager.list()
self.cache += [None for _ in range(len(self.item_list))]
@staticmethod
def find_wav_files(path):
return glob.glob(os.path.join(path, '**', '*.wav'), recursive=True)
def __len__(self):
return len(self.item_list)
def __getitem__(self, idx):
""" Return different items for Generator and Discriminator and
cache acoustic features """
if self.return_segments:
idx2 = self.G_to_D_mappings[idx]
item1 = self.load_item(idx)
item2 = self.load_item(idx2)
return item1, item2
item1 = self.load_item(idx)
return item1
def shuffle_mapping(self):
random.shuffle(self.G_to_D_mappings)
def load_item(self, idx):
""" load (audio, feat) couple """
if self.compute_feat:
# compute features from wav
wavpath = self.item_list[idx]
# print(wavpath)
if self.use_cache and self.cache[idx] is not None:
audio, mel = self.cache[idx]
else:
audio = self.ap.load_wav(wavpath)
if len(audio) < self.seq_len + self.pad_short:
audio = np.pad(audio, (0, self.seq_len + self.pad_short - len(audio)), \
mode='constant', constant_values=0.0)
mel = self.ap.melspectrogram(audio)
else:
# load precomputed features
wavpath, feat_path = self.item_list[idx]
if self.use_cache and self.cache[idx] is not None:
audio, mel = self.cache[idx]
else:
audio = self.ap.load_wav(wavpath)
mel = np.load(feat_path)
# correct the audio length wrt padding applied in stft
audio = np.pad(audio, (0, self.hop_len), mode="edge")
audio = audio[:mel.shape[-1] * self.hop_len]
assert mel.shape[-1] * self.hop_len == audio.shape[-1], f' [!] {mel.shape[-1] * self.hop_len} vs {audio.shape[-1]}'
audio = torch.from_numpy(audio).float().unsqueeze(0)
mel = torch.from_numpy(mel).float().squeeze(0)
if self.return_segments:
max_mel_start = mel.shape[1] - self.feat_frame_len
mel_start = random.randint(0, max_mel_start)
mel_end = mel_start + self.feat_frame_len
mel = mel[:, mel_start:mel_end]
audio_start = mel_start * self.hop_len
audio = audio[:, audio_start:audio_start +
self.seq_len]
if self.use_noise_augment and self.is_training and self.return_segments:
audio = audio + (1 / 32768) * torch.randn_like(audio)
return (mel, audio)
| 4,369
| 33.140625
| 123
|
py
|
TTS
|
TTS-master/TTS/vocoder/datasets/wavernn_dataset.py
|
import torch
import numpy as np
from torch.utils.data import Dataset
class WaveRNNDataset(Dataset):
"""
WaveRNN Dataset searchs for all the wav files under root path
and converts them to acoustic features on the fly.
"""
def __init__(self,
ap,
items,
seq_len,
hop_len,
pad,
mode,
mulaw,
is_training=True,
verbose=False,
):
self.ap = ap
self.compute_feat = not isinstance(items[0], (tuple, list))
self.item_list = items
self.seq_len = seq_len
self.hop_len = hop_len
self.mel_len = seq_len // hop_len
self.pad = pad
self.mode = mode
self.mulaw = mulaw
self.is_training = is_training
self.verbose = verbose
assert self.seq_len % self.hop_len == 0
def __len__(self):
return len(self.item_list)
def __getitem__(self, index):
item = self.load_item(index)
return item
def load_item(self, index):
"""
load (audio, feat) couple if feature_path is set
else compute it on the fly
"""
if self.compute_feat:
wavpath = self.item_list[index]
audio = self.ap.load_wav(wavpath)
min_audio_len = 2 * self.seq_len + (2 * self.pad * self.hop_len)
if audio.shape[0] < min_audio_len:
print(" [!] Instance is too short! : {}".format(wavpath))
audio = np.pad(audio, [0, min_audio_len - audio.shape[0] + self.hop_len])
mel = self.ap.melspectrogram(audio)
if self.mode in ["gauss", "mold"]:
x_input = audio
elif isinstance(self.mode, int):
x_input = (self.ap.mulaw_encode(audio, qc=self.mode)
if self.mulaw else self.ap.quantize(audio, bits=self.mode))
else:
raise RuntimeError("Unknown dataset mode - ", self.mode)
else:
wavpath, feat_path = self.item_list[index]
mel = np.load(feat_path.replace("/quant/", "/mel/"))
if mel.shape[-1] < self.mel_len + 2 * self.pad:
print(" [!] Instance is too short! : {}".format(wavpath))
self.item_list[index] = self.item_list[index + 1]
feat_path = self.item_list[index]
mel = np.load(feat_path.replace("/quant/", "/mel/"))
if self.mode in ["gauss", "mold"]:
x_input = self.ap.load_wav(wavpath)
elif isinstance(self.mode, int):
x_input = np.load(feat_path.replace("/mel/", "/quant/"))
else:
raise RuntimeError("Unknown dataset mode - ", self.mode)
return mel, x_input, wavpath
def collate(self, batch):
mel_win = self.seq_len // self.hop_len + 2 * self.pad
max_offsets = [x[0].shape[-1] -
(mel_win + 2 * self.pad) for x in batch]
mel_offsets = [np.random.randint(0, offset) for offset in max_offsets]
sig_offsets = [(offset + self.pad) *
self.hop_len for offset in mel_offsets]
mels = [
x[0][:, mel_offsets[i]: mel_offsets[i] + mel_win]
for i, x in enumerate(batch)
]
coarse = [
x[1][sig_offsets[i]: sig_offsets[i] + self.seq_len + 1]
for i, x in enumerate(batch)
]
mels = np.stack(mels).astype(np.float32)
if self.mode in ["gauss", "mold"]:
coarse = np.stack(coarse).astype(np.float32)
coarse = torch.FloatTensor(coarse)
x_input = coarse[:, : self.seq_len]
elif isinstance(self.mode, int):
coarse = np.stack(coarse).astype(np.int64)
coarse = torch.LongTensor(coarse)
x_input = (2 * coarse[:, : self.seq_len].float() /
(2 ** self.mode - 1.0) - 1.0)
y_coarse = coarse[:, 1:]
mels = torch.FloatTensor(mels)
return x_input, mels, y_coarse
| 4,136
| 33.764706
| 89
|
py
|
TTS
|
TTS-master/TTS/vocoder/datasets/preprocess.py
|
import glob
import os
from pathlib import Path
from tqdm import tqdm
import numpy as np
def preprocess_wav_files(out_path, config, ap):
os.makedirs(os.path.join(out_path, "quant"), exist_ok=True)
os.makedirs(os.path.join(out_path, "mel"), exist_ok=True)
wav_files = find_wav_files(config.data_path)
for path in tqdm(wav_files):
wav_name = Path(path).stem
quant_path = os.path.join(out_path, "quant", wav_name + ".npy")
mel_path = os.path.join(out_path, "mel", wav_name + ".npy")
y = ap.load_wav(path)
mel = ap.melspectrogram(y)
np.save(mel_path, mel)
if isinstance(config.mode, int):
quant = (
ap.mulaw_encode(y, qc=config.mode)
if config.mulaw
else ap.quantize(y, bits=config.mode)
)
np.save(quant_path, quant)
def find_wav_files(data_path):
wav_paths = glob.glob(os.path.join(data_path, "**", "*.wav"), recursive=True)
return wav_paths
def find_feat_files(data_path):
feat_paths = glob.glob(os.path.join(data_path, "**", "*.npy"), recursive=True)
return feat_paths
def load_wav_data(data_path, eval_split_size):
wav_paths = find_wav_files(data_path)
np.random.seed(0)
np.random.shuffle(wav_paths)
return wav_paths[:eval_split_size], wav_paths[eval_split_size:]
def load_wav_feat_data(data_path, feat_path, eval_split_size):
wav_paths = find_wav_files(data_path)
feat_paths = find_feat_files(feat_path)
wav_paths.sort(key=lambda x: Path(x).stem)
feat_paths.sort(key=lambda x: Path(x).stem)
assert len(wav_paths) == len(feat_paths)
for wav, feat in zip(wav_paths, feat_paths):
wav_name = Path(wav).stem
feat_name = Path(feat).stem
assert wav_name == feat_name
items = list(zip(wav_paths, feat_paths))
np.random.seed(0)
np.random.shuffle(items)
return items[:eval_split_size], items[eval_split_size:]
| 1,966
| 30.222222
| 82
|
py
|
TTS
|
TTS-master/TTS/vocoder/layers/losses.py
|
import torch
from torch import nn
from torch.nn import functional as F
class TorchSTFT(nn.Module):
def __init__(self, n_fft, hop_length, win_length, window='hann_window'):
""" Torch based STFT operation """
super(TorchSTFT, self).__init__()
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = nn.Parameter(getattr(torch, window)(win_length),
requires_grad=False)
def __call__(self, x):
# B x D x T x 2
o = torch.stft(x,
self.n_fft,
self.hop_length,
self.win_length,
self.window,
center=True,
pad_mode="reflect", # compatible with audio.py
normalized=False,
onesided=True,
return_complex=False)
M = o[:, :, :, 0]
P = o[:, :, :, 1]
return torch.sqrt(torch.clamp(M ** 2 + P ** 2, min=1e-8))
#################################
# GENERATOR LOSSES
#################################
class STFTLoss(nn.Module):
""" Single scale STFT Loss """
def __init__(self, n_fft, hop_length, win_length):
super(STFTLoss, self).__init__()
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.stft = TorchSTFT(n_fft, hop_length, win_length)
def forward(self, y_hat, y):
y_hat_M = self.stft(y_hat)
y_M = self.stft(y)
# magnitude loss
loss_mag = F.l1_loss(torch.log(y_M), torch.log(y_hat_M))
# spectral convergence loss
loss_sc = torch.norm(y_M - y_hat_M, p="fro") / torch.norm(y_M, p="fro")
return loss_mag, loss_sc
class MultiScaleSTFTLoss(torch.nn.Module):
""" Multi scale STFT loss """
def __init__(self,
n_ffts=(1024, 2048, 512),
hop_lengths=(120, 240, 50),
win_lengths=(600, 1200, 240)):
super(MultiScaleSTFTLoss, self).__init__()
self.loss_funcs = torch.nn.ModuleList()
for n_fft, hop_length, win_length in zip(n_ffts, hop_lengths, win_lengths):
self.loss_funcs.append(STFTLoss(n_fft, hop_length, win_length))
def forward(self, y_hat, y):
N = len(self.loss_funcs)
loss_sc = 0
loss_mag = 0
for f in self.loss_funcs:
lm, lsc = f(y_hat, y)
loss_mag += lm
loss_sc += lsc
loss_sc /= N
loss_mag /= N
return loss_mag, loss_sc
class MultiScaleSubbandSTFTLoss(MultiScaleSTFTLoss):
""" Multiscale STFT loss for multi band model outputs """
# pylint: disable=no-self-use
def forward(self, y_hat, y):
y_hat = y_hat.view(-1, 1, y_hat.shape[2])
y = y.view(-1, 1, y.shape[2])
return super().forward(y_hat.squeeze(1), y.squeeze(1))
class MSEGLoss(nn.Module):
""" Mean Squared Generator Loss """
# pylint: disable=no-self-use
def forward(self, score_real):
loss_fake = F.mse_loss(score_real, score_real.new_ones(score_real.shape))
return loss_fake
class HingeGLoss(nn.Module):
""" Hinge Discriminator Loss """
# pylint: disable=no-self-use
def forward(self, score_real):
# TODO: this might be wrong
loss_fake = torch.mean(F.relu(1. - score_real))
return loss_fake
##################################
# DISCRIMINATOR LOSSES
##################################
class MSEDLoss(nn.Module):
""" Mean Squared Discriminator Loss """
def __init__(self,):
super(MSEDLoss, self).__init__()
self.loss_func = nn.MSELoss()
# pylint: disable=no-self-use
def forward(self, score_fake, score_real):
loss_real = self.loss_func(score_real, score_real.new_ones(score_real.shape))
loss_fake = self.loss_func(score_fake, score_fake.new_zeros(score_fake.shape))
loss_d = loss_real + loss_fake
return loss_d, loss_real, loss_fake
class HingeDLoss(nn.Module):
""" Hinge Discriminator Loss """
# pylint: disable=no-self-use
def forward(self, score_fake, score_real):
loss_real = torch.mean(F.relu(1. - score_real))
loss_fake = torch.mean(F.relu(1. + score_fake))
loss_d = loss_real + loss_fake
return loss_d, loss_real, loss_fake
class MelganFeatureLoss(nn.Module):
def __init__(self,):
super(MelganFeatureLoss, self).__init__()
self.loss_func = nn.L1Loss()
# pylint: disable=no-self-use
def forward(self, fake_feats, real_feats):
loss_feats = 0
for fake_feat, real_feat in zip(fake_feats, real_feats):
loss_feats += self.loss_func(fake_feat, real_feat)
loss_feats /= len(fake_feats) + len(real_feats)
return loss_feats
#####################################
# LOSS WRAPPERS
#####################################
def _apply_G_adv_loss(scores_fake, loss_func):
""" Compute G adversarial loss function
and normalize values """
adv_loss = 0
if isinstance(scores_fake, list):
for score_fake in scores_fake:
fake_loss = loss_func(score_fake)
adv_loss += fake_loss
adv_loss /= len(scores_fake)
else:
fake_loss = loss_func(scores_fake)
adv_loss = fake_loss
return adv_loss
def _apply_D_loss(scores_fake, scores_real, loss_func):
""" Compute D loss func and normalize loss values """
loss = 0
real_loss = 0
fake_loss = 0
if isinstance(scores_fake, list):
# multi-scale loss
for score_fake, score_real in zip(scores_fake, scores_real):
total_loss, real_loss, fake_loss = loss_func(score_fake=score_fake, score_real=score_real)
loss += total_loss
real_loss += real_loss
fake_loss += fake_loss
# normalize loss values with number of scales
loss /= len(scores_fake)
real_loss /= len(scores_real)
fake_loss /= len(scores_fake)
else:
# single scale loss
total_loss, real_loss, fake_loss = loss_func(scores_fake, scores_real)
loss = total_loss
return loss, real_loss, fake_loss
##################################
# MODEL LOSSES
##################################
class GeneratorLoss(nn.Module):
def __init__(self, C):
""" Compute Generator Loss values depending on training
configuration """
super(GeneratorLoss, self).__init__()
assert not(C.use_mse_gan_loss and C.use_hinge_gan_loss),\
" [!] Cannot use HingeGANLoss and MSEGANLoss together."
self.use_stft_loss = C.use_stft_loss
self.use_subband_stft_loss = C.use_subband_stft_loss
self.use_mse_gan_loss = C.use_mse_gan_loss
self.use_hinge_gan_loss = C.use_hinge_gan_loss
self.use_feat_match_loss = C.use_feat_match_loss
self.stft_loss_weight = C.stft_loss_weight
self.subband_stft_loss_weight = C.subband_stft_loss_weight
self.mse_gan_loss_weight = C.mse_G_loss_weight
self.hinge_gan_loss_weight = C.hinge_G_loss_weight
self.feat_match_loss_weight = C.feat_match_loss_weight
if C.use_stft_loss:
self.stft_loss = MultiScaleSTFTLoss(**C.stft_loss_params)
if C.use_subband_stft_loss:
self.subband_stft_loss = MultiScaleSubbandSTFTLoss(**C.subband_stft_loss_params)
if C.use_mse_gan_loss:
self.mse_loss = MSEGLoss()
if C.use_hinge_gan_loss:
self.hinge_loss = HingeGLoss()
if C.use_feat_match_loss:
self.feat_match_loss = MelganFeatureLoss()
def forward(self, y_hat=None, y=None, scores_fake=None, feats_fake=None, feats_real=None, y_hat_sub=None, y_sub=None):
gen_loss = 0
adv_loss = 0
return_dict = {}
# STFT Loss
if self.use_stft_loss:
stft_loss_mg, stft_loss_sc = self.stft_loss(y_hat.squeeze(1), y.squeeze(1))
return_dict['G_stft_loss_mg'] = stft_loss_mg
return_dict['G_stft_loss_sc'] = stft_loss_sc
gen_loss += self.stft_loss_weight * (stft_loss_mg + stft_loss_sc)
# subband STFT Loss
if self.use_subband_stft_loss:
subband_stft_loss_mg, subband_stft_loss_sc = self.subband_stft_loss(y_hat_sub, y_sub)
return_dict['G_subband_stft_loss_mg'] = subband_stft_loss_mg
return_dict['G_subband_stft_loss_sc'] = subband_stft_loss_sc
gen_loss += self.subband_stft_loss_weight * (subband_stft_loss_mg + subband_stft_loss_sc)
# multiscale MSE adversarial loss
if self.use_mse_gan_loss and scores_fake is not None:
mse_fake_loss = _apply_G_adv_loss(scores_fake, self.mse_loss)
return_dict['G_mse_fake_loss'] = mse_fake_loss
adv_loss += self.mse_gan_loss_weight * mse_fake_loss
# multiscale Hinge adversarial loss
if self.use_hinge_gan_loss and not scores_fake is not None:
hinge_fake_loss = _apply_G_adv_loss(scores_fake, self.hinge_loss)
return_dict['G_hinge_fake_loss'] = hinge_fake_loss
adv_loss += self.hinge_gan_loss_weight * hinge_fake_loss
# Feature Matching Loss
if self.use_feat_match_loss and not feats_fake:
feat_match_loss = self.feat_match_loss(feats_fake, feats_real)
return_dict['G_feat_match_loss'] = feat_match_loss
adv_loss += self.feat_match_loss_weight * feat_match_loss
return_dict['G_loss'] = gen_loss + adv_loss
return_dict['G_gen_loss'] = gen_loss
return_dict['G_adv_loss'] = adv_loss
return return_dict
class DiscriminatorLoss(nn.Module):
""" Compute Discriminator Loss values depending on training
configuration """
def __init__(self, C):
super(DiscriminatorLoss, self).__init__()
assert not(C.use_mse_gan_loss and C.use_hinge_gan_loss),\
" [!] Cannot use HingeGANLoss and MSEGANLoss together."
self.use_mse_gan_loss = C.use_mse_gan_loss
self.use_hinge_gan_loss = C.use_hinge_gan_loss
if C.use_mse_gan_loss:
self.mse_loss = MSEDLoss()
if C.use_hinge_gan_loss:
self.hinge_loss = HingeDLoss()
def forward(self, scores_fake, scores_real):
loss = 0
return_dict = {}
if self.use_mse_gan_loss:
mse_D_loss, mse_D_real_loss, mse_D_fake_loss = _apply_D_loss(
scores_fake=scores_fake,
scores_real=scores_real,
loss_func=self.mse_loss)
return_dict['D_mse_gan_loss'] = mse_D_loss
return_dict['D_mse_gan_real_loss'] = mse_D_real_loss
return_dict['D_mse_gan_fake_loss'] = mse_D_fake_loss
loss += mse_D_loss
if self.use_hinge_gan_loss:
hinge_D_loss, hinge_D_real_loss, hinge_D_fake_loss = _apply_D_loss(
scores_fake=scores_fake,
scores_real=scores_real,
loss_func=self.hinge_loss)
return_dict['D_hinge_gan_loss'] = hinge_D_loss
return_dict['D_hinge_gan_real_loss'] = hinge_D_real_loss
return_dict['D_hinge_gan_fake_loss'] = hinge_D_fake_loss
loss += hinge_D_loss
return_dict['D_loss'] = loss
return return_dict
| 11,390
| 35.392971
| 122
|
py
|
TTS
|
TTS-master/TTS/vocoder/layers/parallel_wavegan.py
|
import torch
from torch.nn import functional as F
class ResidualBlock(torch.nn.Module):
"""Residual block module in WaveNet."""
def __init__(self,
kernel_size=3,
res_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
dropout=0.0,
dilation=1,
bias=True,
use_causal_conv=False):
super(ResidualBlock, self).__init__()
self.dropout = dropout
# no future time stamps available
if use_causal_conv:
padding = (kernel_size - 1) * dilation
else:
assert (kernel_size -
1) % 2 == 0, "Not support even number kernel size."
padding = (kernel_size - 1) // 2 * dilation
self.use_causal_conv = use_causal_conv
# dilation conv
self.conv = torch.nn.Conv1d(res_channels,
gate_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias=bias)
# local conditioning
if aux_channels > 0:
self.conv1x1_aux = torch.nn.Conv1d(aux_channels,
gate_channels,
1,
bias=False)
else:
self.conv1x1_aux = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
self.conv1x1_out = torch.nn.Conv1d(gate_out_channels,
res_channels,
1,
bias=bias)
self.conv1x1_skip = torch.nn.Conv1d(gate_out_channels,
skip_channels,
1,
bias=bias)
def forward(self, x, c):
"""
x: B x D_res x T
c: B x D_aux x T
"""
residual = x
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv(x)
# remove future time steps if use_causal_conv conv
x = x[:, :, :residual.size(-1)] if self.use_causal_conv else x
# split into two part for gated activation
splitdim = 1
xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim)
# local conditioning
if c is not None:
assert self.conv1x1_aux is not None
c = self.conv1x1_aux(c)
ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
xa, xb = xa + ca, xb + cb
x = torch.tanh(xa) * torch.sigmoid(xb)
# for skip connection
s = self.conv1x1_skip(x)
# for residual connection
x = (self.conv1x1_out(x) + residual) * (0.5**2)
return x, s
| 3,035
| 33.5
| 71
|
py
|
TTS
|
TTS-master/TTS/vocoder/layers/pqmf.py
|
import numpy as np
import torch
import torch.nn.functional as F
from scipy import signal as sig
# adapted from
# https://github.com/kan-bayashi/ParallelWaveGAN/tree/master/parallel_wavegan
class PQMF(torch.nn.Module):
def __init__(self, N=4, taps=62, cutoff=0.15, beta=9.0):
super(PQMF, self).__init__()
self.N = N
self.taps = taps
self.cutoff = cutoff
self.beta = beta
QMF = sig.firwin(taps + 1, cutoff, window=('kaiser', beta))
H = np.zeros((N, len(QMF)))
G = np.zeros((N, len(QMF)))
for k in range(N):
constant_factor = (2 * k + 1) * (np.pi /
(2 * N)) * (np.arange(taps + 1) -
((taps - 1) / 2)) # TODO: (taps - 1) -> taps
phase = (-1)**k * np.pi / 4
H[k] = 2 * QMF * np.cos(constant_factor + phase)
G[k] = 2 * QMF * np.cos(constant_factor - phase)
H = torch.from_numpy(H[:, None, :]).float()
G = torch.from_numpy(G[None, :, :]).float()
self.register_buffer("H", H)
self.register_buffer("G", G)
updown_filter = torch.zeros((N, N, N)).float()
for k in range(N):
updown_filter[k, k, 0] = 1.0
self.register_buffer("updown_filter", updown_filter)
self.N = N
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
def forward(self, x):
return self.analysis(x)
def analysis(self, x):
return F.conv1d(x, self.H, padding=self.taps // 2, stride=self.N)
def synthesis(self, x):
x = F.conv_transpose1d(x,
self.updown_filter * self.N,
stride=self.N)
x = F.conv1d(x, self.G, padding=self.taps // 2)
return x
| 1,833
| 31.175439
| 102
|
py
|
TTS
|
TTS-master/TTS/vocoder/layers/melgan.py
|
from torch import nn
from torch.nn.utils import weight_norm
class ResidualStack(nn.Module):
def __init__(self, channels, num_res_blocks, kernel_size):
super(ResidualStack, self).__init__()
assert (kernel_size - 1) % 2 == 0, " [!] kernel_size has to be odd."
base_padding = (kernel_size - 1) // 2
self.blocks = nn.ModuleList()
for idx in range(num_res_blocks):
layer_kernel_size = kernel_size
layer_dilation = layer_kernel_size**idx
layer_padding = base_padding * layer_dilation
self.blocks += [nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(layer_padding),
weight_norm(
nn.Conv1d(channels,
channels,
kernel_size=kernel_size,
dilation=layer_dilation,
bias=True)),
nn.LeakyReLU(0.2),
weight_norm(
nn.Conv1d(channels, channels, kernel_size=1, bias=True)),
)]
self.shortcuts = nn.ModuleList([
weight_norm(nn.Conv1d(channels, channels, kernel_size=1,
bias=True)) for i in range(num_res_blocks)
])
def forward(self, x):
for block, shortcut in zip(self.blocks, self.shortcuts):
x = shortcut(x) + block(x)
return x
def remove_weight_norm(self):
for block, shortcut in zip(self.blocks, self.shortcuts):
nn.utils.remove_weight_norm(block[2])
nn.utils.remove_weight_norm(block[4])
nn.utils.remove_weight_norm(shortcut)
| 1,707
| 36.130435
| 77
|
py
|
TTS
|
TTS-master/TTS/vocoder/layers/wavegrad.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
class Conv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
nn.init.orthogonal_(self.weight)
nn.init.zeros_(self.bias)
class PositionalEncoding(nn.Module):
"""Positional encoding with noise level conditioning"""
def __init__(self, n_channels, max_len=10000):
super().__init__()
self.n_channels = n_channels
self.max_len = max_len
self.C = 5000
self.pe = torch.zeros(0, 0)
def forward(self, x, noise_level):
if x.shape[2] > self.pe.shape[1]:
self.init_pe_matrix(x.shape[1] ,x.shape[2], x)
return x + noise_level[..., None, None] + self.pe[:, :x.size(2)].repeat(x.shape[0], 1, 1) / self.C
def init_pe_matrix(self, n_channels, max_len, x):
pe = torch.zeros(max_len, n_channels)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.pow(10000, torch.arange(0, n_channels, 2).float() / n_channels)
pe[:, 0::2] = torch.sin(position / div_term)
pe[:, 1::2] = torch.cos(position / div_term)
self.pe = pe.transpose(0, 1).to(x)
class FiLM(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.encoding = PositionalEncoding(input_size)
self.input_conv = nn.Conv1d(input_size, input_size, 3, padding=1)
self.output_conv = nn.Conv1d(input_size, output_size * 2, 3, padding=1)
nn.init.xavier_uniform_(self.input_conv.weight)
nn.init.xavier_uniform_(self.output_conv.weight)
nn.init.zeros_(self.input_conv.bias)
nn.init.zeros_(self.output_conv.bias)
def forward(self, x, noise_scale):
o = self.input_conv(x)
o = F.leaky_relu(o, 0.2)
o = self.encoding(o, noise_scale)
shift, scale = torch.chunk(self.output_conv(o), 2, dim=1)
return shift, scale
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.input_conv)
nn.utils.remove_weight_norm(self.output_conv)
def apply_weight_norm(self):
self.input_conv = weight_norm(self.input_conv)
self.output_conv = weight_norm(self.output_conv)
@torch.jit.script
def shif_and_scale(x, scale, shift):
o = shift + scale * x
return o
class UBlock(nn.Module):
def __init__(self, input_size, hidden_size, factor, dilation):
super().__init__()
assert isinstance(dilation, (list, tuple))
assert len(dilation) == 4
self.factor = factor
self.res_block = Conv1d(input_size, hidden_size, 1)
self.main_block = nn.ModuleList([
Conv1d(input_size,
hidden_size,
3,
dilation=dilation[0],
padding=dilation[0]),
Conv1d(hidden_size,
hidden_size,
3,
dilation=dilation[1],
padding=dilation[1])
])
self.out_block = nn.ModuleList([
Conv1d(hidden_size,
hidden_size,
3,
dilation=dilation[2],
padding=dilation[2]),
Conv1d(hidden_size,
hidden_size,
3,
dilation=dilation[3],
padding=dilation[3])
])
def forward(self, x, shift, scale):
x_inter = F.interpolate(x, size=x.shape[-1] * self.factor)
res = self.res_block(x_inter)
o = F.leaky_relu(x_inter, 0.2)
o = F.interpolate(o, size=x.shape[-1] * self.factor)
o = self.main_block[0](o)
o = shif_and_scale(o, scale, shift)
o = F.leaky_relu(o, 0.2)
o = self.main_block[1](o)
res2 = res + o
o = shif_and_scale(res2, scale, shift)
o = F.leaky_relu(o, 0.2)
o = self.out_block[0](o)
o = shif_and_scale(o, scale, shift)
o = F.leaky_relu(o, 0.2)
o = self.out_block[1](o)
o = o + res2
return o
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.res_block)
for _, layer in enumerate(self.main_block):
if len(layer.state_dict()) != 0:
nn.utils.remove_weight_norm(layer)
for _, layer in enumerate(self.out_block):
if len(layer.state_dict()) != 0:
nn.utils.remove_weight_norm(layer)
def apply_weight_norm(self):
self.res_block = weight_norm(self.res_block)
for idx, layer in enumerate(self.main_block):
if len(layer.state_dict()) != 0:
self.main_block[idx] = weight_norm(layer)
for idx, layer in enumerate(self.out_block):
if len(layer.state_dict()) != 0:
self.out_block[idx] = weight_norm(layer)
class DBlock(nn.Module):
def __init__(self, input_size, hidden_size, factor):
super().__init__()
self.factor = factor
self.res_block = Conv1d(input_size, hidden_size, 1)
self.main_block = nn.ModuleList([
Conv1d(input_size, hidden_size, 3, dilation=1, padding=1),
Conv1d(hidden_size, hidden_size, 3, dilation=2, padding=2),
Conv1d(hidden_size, hidden_size, 3, dilation=4, padding=4),
])
def forward(self, x):
size = x.shape[-1] // self.factor
res = self.res_block(x)
res = F.interpolate(res, size=size)
o = F.interpolate(x, size=size)
for layer in self.main_block:
o = F.leaky_relu(o, 0.2)
o = layer(o)
return o + res
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.res_block)
for _, layer in enumerate(self.main_block):
if len(layer.state_dict()) != 0:
nn.utils.remove_weight_norm(layer)
def apply_weight_norm(self):
self.res_block = weight_norm(self.res_block)
for idx, layer in enumerate(self.main_block):
if len(layer.state_dict()) != 0:
self.main_block[idx] = weight_norm(layer)
| 6,178
| 34.107955
| 106
|
py
|
TTS
|
TTS-master/TTS/vocoder/layers/__init__.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/TTS/vocoder/layers/upsample.py
|
import torch
from torch.nn import functional as F
class Stretch2d(torch.nn.Module):
def __init__(self, x_scale, y_scale, mode="nearest"):
super(Stretch2d, self).__init__()
self.x_scale = x_scale
self.y_scale = y_scale
self.mode = mode
def forward(self, x):
"""
x (Tensor): Input tensor (B, C, F, T).
Tensor: Interpolated tensor (B, C, F * y_scale, T * x_scale),
"""
return F.interpolate(
x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode)
class UpsampleNetwork(torch.nn.Module):
# pylint: disable=dangerous-default-value
def __init__(self,
upsample_factors,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
use_causal_conv=False,
):
super(UpsampleNetwork, self).__init__()
self.use_causal_conv = use_causal_conv
self.up_layers = torch.nn.ModuleList()
for scale in upsample_factors:
# interpolation layer
stretch = Stretch2d(scale, 1, interpolate_mode)
self.up_layers += [stretch]
# conv layer
assert (freq_axis_kernel_size - 1) % 2 == 0, "Not support even number freq axis kernel size."
freq_axis_padding = (freq_axis_kernel_size - 1) // 2
kernel_size = (freq_axis_kernel_size, scale * 2 + 1)
if use_causal_conv:
padding = (freq_axis_padding, scale * 2)
else:
padding = (freq_axis_padding, scale)
conv = torch.nn.Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
self.up_layers += [conv]
# nonlinear
if nonlinear_activation is not None:
nonlinear = getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)
self.up_layers += [nonlinear]
def forward(self, c):
"""
c : (B, C, T_in).
Tensor: (B, C, T_upsample)
"""
c = c.unsqueeze(1) # (B, 1, C, T)
for f in self.up_layers:
c = f(c)
return c.squeeze(1) # (B, C, T')
class ConvUpsample(torch.nn.Module):
# pylint: disable=dangerous-default-value
def __init__(self,
upsample_factors,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
aux_channels=80,
aux_context_window=0,
use_causal_conv=False
):
super(ConvUpsample, self).__init__()
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
# To capture wide-context information in conditional features
kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
# NOTE(kan-bayashi): Here do not use padding because the input is already padded
self.conv_in = torch.nn.Conv1d(aux_channels, aux_channels, kernel_size=kernel_size, bias=False)
self.upsample = UpsampleNetwork(
upsample_factors=upsample_factors,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv,
)
def forward(self, c):
"""
c : (B, C, T_in).
Tensor: (B, C, T_upsampled),
"""
c_ = self.conv_in(c)
c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_
return self.upsample(c)
| 3,899
| 37.235294
| 105
|
py
|
TTS
|
TTS-master/TTS/vocoder/utils/generic_utils.py
|
import re
import torch
import importlib
import numpy as np
from matplotlib import pyplot as plt
from TTS.tts.utils.visual import plot_spectrogram
def interpolate_vocoder_input(scale_factor, spec):
"""Interpolate spectrogram by the scale factor.
It is mainly used to match the sampling rates of
the tts and vocoder models.
Args:
scale_factor (float): scale factor to interpolate the spectrogram
spec (np.array): spectrogram to be interpolated
Returns:
torch.tensor: interpolated spectrogram.
"""
print(" > before interpolation :", spec.shape)
spec = torch.tensor(spec).unsqueeze(0).unsqueeze(0) # pylint: disable=not-callable
spec = torch.nn.functional.interpolate(spec,
scale_factor=scale_factor,
recompute_scale_factor=True,
mode='bilinear',
align_corners=False).squeeze(0)
print(" > after interpolation :", spec.shape)
return spec
def plot_results(y_hat, y, ap, global_step, name_prefix):
""" Plot vocoder model results """
# select an instance from batch
y_hat = y_hat[0].squeeze(0).detach().cpu().numpy()
y = y[0].squeeze(0).detach().cpu().numpy()
spec_fake = ap.melspectrogram(y_hat).T
spec_real = ap.melspectrogram(y).T
spec_diff = np.abs(spec_fake - spec_real)
# plot figure and save it
fig_wave = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_hat)
plt.title(f"generated speech @ {global_step} steps")
plt.tight_layout()
plt.close()
figures = {
name_prefix + "spectrogram/fake": plot_spectrogram(spec_fake),
name_prefix + "spectrogram/real": plot_spectrogram(spec_real),
name_prefix + "spectrogram/diff": plot_spectrogram(spec_diff),
name_prefix + "speech_comparison": fig_wave,
}
return figures
def to_camel(text):
text = text.capitalize()
return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), text)
def setup_wavernn(c):
print(" > Model: WaveRNN")
MyModel = importlib.import_module("TTS.vocoder.models.wavernn")
MyModel = getattr(MyModel, "WaveRNN")
model = MyModel(
rnn_dims=c.wavernn_model_params['rnn_dims'],
fc_dims=c.wavernn_model_params['fc_dims'],
mode=c.mode,
mulaw=c.mulaw,
pad=c.padding,
use_aux_net=c.wavernn_model_params['use_aux_net'],
use_upsample_net=c.wavernn_model_params['use_upsample_net'],
upsample_factors=c.wavernn_model_params['upsample_factors'],
feat_dims=c.audio['num_mels'],
compute_dims=c.wavernn_model_params['compute_dims'],
res_out_dims=c.wavernn_model_params['res_out_dims'],
num_res_blocks=c.wavernn_model_params['num_res_blocks'],
hop_length=c.audio["hop_length"],
sample_rate=c.audio["sample_rate"],
)
return model
def setup_generator(c):
print(" > Generator Model: {}".format(c.generator_model))
MyModel = importlib.import_module('TTS.vocoder.models.' +
c.generator_model.lower())
MyModel = getattr(MyModel, to_camel(c.generator_model))
if c.generator_model.lower() in 'melgan_generator':
model = MyModel(
in_channels=c.audio['num_mels'],
out_channels=1,
proj_kernel=7,
base_channels=512,
upsample_factors=c.generator_model_params['upsample_factors'],
res_kernel=3,
num_res_blocks=c.generator_model_params['num_res_blocks'])
if c.generator_model in 'melgan_fb_generator':
pass
if c.generator_model.lower() in 'multiband_melgan_generator':
model = MyModel(
in_channels=c.audio['num_mels'],
out_channels=4,
proj_kernel=7,
base_channels=384,
upsample_factors=c.generator_model_params['upsample_factors'],
res_kernel=3,
num_res_blocks=c.generator_model_params['num_res_blocks'])
if c.generator_model.lower() in 'fullband_melgan_generator':
model = MyModel(
in_channels=c.audio['num_mels'],
out_channels=1,
proj_kernel=7,
base_channels=512,
upsample_factors=c.generator_model_params['upsample_factors'],
res_kernel=3,
num_res_blocks=c.generator_model_params['num_res_blocks'])
if c.generator_model.lower() in 'parallel_wavegan_generator':
model = MyModel(
in_channels=1,
out_channels=1,
kernel_size=3,
num_res_blocks=c.generator_model_params['num_res_blocks'],
stacks=c.generator_model_params['stacks'],
res_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=c.audio['num_mels'],
dropout=0.0,
bias=True,
use_weight_norm=True,
upsample_factors=c.generator_model_params['upsample_factors'])
if c.generator_model.lower() in 'wavegrad':
model = MyModel(
in_channels=c['audio']['num_mels'],
out_channels=1,
use_weight_norm=c['model_params']['use_weight_norm'],
x_conv_channels=c['model_params']['x_conv_channels'],
y_conv_channels=c['model_params']['y_conv_channels'],
dblock_out_channels=c['model_params']['dblock_out_channels'],
ublock_out_channels=c['model_params']['ublock_out_channels'],
upsample_factors=c['model_params']['upsample_factors'],
upsample_dilations=c['model_params']['upsample_dilations'])
return model
def setup_discriminator(c):
print(" > Discriminator Model: {}".format(c.discriminator_model))
if 'parallel_wavegan' in c.discriminator_model:
MyModel = importlib.import_module(
'TTS.vocoder.models.parallel_wavegan_discriminator')
else:
MyModel = importlib.import_module('TTS.vocoder.models.' +
c.discriminator_model.lower())
MyModel = getattr(MyModel, to_camel(c.discriminator_model.lower()))
if c.discriminator_model in 'random_window_discriminator':
model = MyModel(
cond_channels=c.audio['num_mels'],
hop_length=c.audio['hop_length'],
uncond_disc_donwsample_factors=c.
discriminator_model_params['uncond_disc_donwsample_factors'],
cond_disc_downsample_factors=c.
discriminator_model_params['cond_disc_downsample_factors'],
cond_disc_out_channels=c.
discriminator_model_params['cond_disc_out_channels'],
window_sizes=c.discriminator_model_params['window_sizes'])
if c.discriminator_model in 'melgan_multiscale_discriminator':
model = MyModel(
in_channels=1,
out_channels=1,
kernel_sizes=(5, 3),
base_channels=c.discriminator_model_params['base_channels'],
max_channels=c.discriminator_model_params['max_channels'],
downsample_factors=c.
discriminator_model_params['downsample_factors'])
if c.discriminator_model == 'residual_parallel_wavegan_discriminator':
model = MyModel(
in_channels=1,
out_channels=1,
kernel_size=3,
num_layers=c.discriminator_model_params['num_layers'],
stacks=c.discriminator_model_params['stacks'],
res_channels=64,
gate_channels=128,
skip_channels=64,
dropout=0.0,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
)
if c.discriminator_model == 'parallel_wavegan_discriminator':
model = MyModel(
in_channels=1,
out_channels=1,
kernel_size=3,
num_layers=c.discriminator_model_params['num_layers'],
conv_channels=64,
dilation_factor=1,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
bias=True
)
return model
# def check_config(c):
# c = None
# pass
| 8,372
| 37.585253
| 87
|
py
|
TTS
|
TTS-master/TTS/vocoder/utils/distribution.py
|
import numpy as np
import math
import torch
from torch.distributions.normal import Normal
import torch.nn.functional as F
def gaussian_loss(y_hat, y, log_std_min=-7.0):
assert y_hat.dim() == 3
assert y_hat.size(2) == 2
mean = y_hat[:, :, :1]
log_std = torch.clamp(y_hat[:, :, 1:], min=log_std_min)
# TODO: replace with pytorch dist
log_probs = -0.5 * (
-math.log(2.0 * math.pi)
- 2.0 * log_std
- torch.pow(y - mean, 2) * torch.exp((-2.0 * log_std))
)
return log_probs.squeeze().mean()
def sample_from_gaussian(y_hat, log_std_min=-7.0, scale_factor=1.0):
assert y_hat.size(2) == 2
mean = y_hat[:, :, :1]
log_std = torch.clamp(y_hat[:, :, 1:], min=log_std_min)
dist = Normal(
mean,
torch.exp(log_std),
)
sample = dist.sample()
sample = torch.clamp(torch.clamp(
sample, min=-scale_factor), max=scale_factor)
del dist
return sample
def log_sum_exp(x):
""" numerically stable log_sum_exp implementation that prevents overflow """
# TF ordering
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis)
m2, _ = torch.max(x, dim=axis, keepdim=True)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
# It is adapted from https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py
def discretized_mix_logistic_loss(
y_hat, y, num_classes=65536, log_scale_min=None, reduce=True
):
if log_scale_min is None:
log_scale_min = float(np.log(1e-14))
y_hat = y_hat.permute(0, 2, 1)
assert y_hat.dim() == 3
assert y_hat.size(1) % 3 == 0
nr_mix = y_hat.size(1) // 3
# (B x T x C)
y_hat = y_hat.transpose(1, 2)
# unpack parameters. (B, T, num_mixtures) x 3
logit_probs = y_hat[:, :, :nr_mix]
means = y_hat[:, :, nr_mix: 2 * nr_mix]
log_scales = torch.clamp(
y_hat[:, :, 2 * nr_mix: 3 * nr_mix], min=log_scale_min)
# B x T x 1 -> B x T x num_mixtures
y = y.expand_as(means)
centered_y = y - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1.0 / (num_classes - 1))
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1.0 / (num_classes - 1))
cdf_min = torch.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
# equivalent: torch.log(F.sigmoid(plus_in))
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
# equivalent: (1 - F.sigmoid(min_in)).log()
log_one_minus_cdf_min = -F.softplus(min_in)
# probability for all other cases
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in)
# tf equivalent
# log_probs = tf.where(x < -0.999, log_cdf_plus,
# tf.where(x > 0.999, log_one_minus_cdf_min,
# tf.where(cdf_delta > 1e-5,
# tf.log(tf.maximum(cdf_delta, 1e-12)),
# log_pdf_mid - np.log(127.5))))
# TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value
# for num_classes=65536 case? 1e-7? not sure..
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * torch.log(
torch.clamp(cdf_delta, min=1e-12)
) + (1.0 - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2))
inner_cond = (y > 0.999).float()
inner_out = (
inner_cond * log_one_minus_cdf_min +
(1.0 - inner_cond) * inner_inner_out
)
cond = (y < -0.999).float()
log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out
log_probs = log_probs + F.log_softmax(logit_probs, -1)
if reduce:
return -torch.mean(log_sum_exp(log_probs))
return -log_sum_exp(log_probs).unsqueeze(-1)
def sample_from_discretized_mix_logistic(y, log_scale_min=None):
"""
Sample from discretized mixture of logistic distributions
Args:
y (Tensor): B x C x T
log_scale_min (float): Log scale minimum value
Returns:
Tensor: sample in range of [-1, 1].
"""
if log_scale_min is None:
log_scale_min = float(np.log(1e-14))
assert y.size(1) % 3 == 0
nr_mix = y.size(1) // 3
# B x T x C
y = y.transpose(1, 2)
logit_probs = y[:, :, :nr_mix]
# sample mixture indicator from softmax
temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)
temp = logit_probs.data - torch.log(-torch.log(temp))
_, argmax = temp.max(dim=-1)
# (B, T) -> (B, T, nr_mix)
one_hot = to_one_hot(argmax, nr_mix)
# select logistic parameters
means = torch.sum(y[:, :, nr_mix: 2 * nr_mix] * one_hot, dim=-1)
log_scales = torch.clamp(
torch.sum(y[:, :, 2 * nr_mix: 3 * nr_mix] * one_hot, dim=-1), min=log_scale_min
)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1.0 - u))
x = torch.clamp(torch.clamp(x, min=-1.0), max=1.0)
return x
def to_one_hot(tensor, n, fill_with=1.0):
# we perform one hot encore with respect to the last axis
one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_()
if tensor.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with)
return one_hot
| 5,684
| 32.639053
| 99
|
py
|
TTS
|
TTS-master/TTS/vocoder/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/TTS/vocoder/utils/io.py
|
import os
import torch
import datetime
import pickle as pickle_tts
from TTS.utils.io import RenamingUnpickler
def load_checkpoint(model, checkpoint_path, use_cuda=False, eval=False):
try:
state = torch.load(checkpoint_path, map_location=torch.device('cpu'))
except ModuleNotFoundError:
pickle_tts.Unpickler = RenamingUnpickler
state = torch.load(checkpoint_path, map_location=torch.device('cpu'), pickle_module=pickle_tts)
model.load_state_dict(state['model'])
if use_cuda:
model.cuda()
if eval:
model.eval()
return model, state
def save_model(model, optimizer, scheduler, model_disc, optimizer_disc,
scheduler_disc, current_step, epoch, output_path, **kwargs):
if hasattr(model, 'module'):
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
model_disc_state = model_disc.state_dict()\
if model_disc is not None else None
optimizer_state = optimizer.state_dict()\
if optimizer is not None else None
optimizer_disc_state = optimizer_disc.state_dict()\
if optimizer_disc is not None else None
scheduler_state = scheduler.state_dict()\
if scheduler is not None else None
scheduler_disc_state = scheduler_disc.state_dict()\
if scheduler_disc is not None else None
state = {
'model': model_state,
'optimizer': optimizer_state,
'scheduler': scheduler_state,
'model_disc': model_disc_state,
'optimizer_disc': optimizer_disc_state,
'scheduler_disc': scheduler_disc_state,
'step': current_step,
'epoch': epoch,
'date': datetime.date.today().strftime("%B %d, %Y"),
}
state.update(kwargs)
torch.save(state, output_path)
def save_checkpoint(model, optimizer, scheduler, model_disc, optimizer_disc,
scheduler_disc, current_step, epoch, output_folder,
**kwargs):
file_name = 'checkpoint_{}.pth.tar'.format(current_step)
checkpoint_path = os.path.join(output_folder, file_name)
print(" > CHECKPOINT : {}".format(checkpoint_path))
save_model(model, optimizer, scheduler, model_disc, optimizer_disc,
scheduler_disc, current_step, epoch, checkpoint_path, **kwargs)
def save_best_model(target_loss, best_loss, model, optimizer, scheduler,
model_disc, optimizer_disc, scheduler_disc, current_step,
epoch, output_folder, **kwargs):
if target_loss < best_loss:
file_name = 'best_model.pth.tar'
checkpoint_path = os.path.join(output_folder, file_name)
print(" > BEST MODEL : {}".format(checkpoint_path))
save_model(model,
optimizer,
scheduler,
model_disc,
optimizer_disc,
scheduler_disc,
current_step,
epoch,
checkpoint_path,
model_loss=target_loss,
**kwargs)
best_loss = target_loss
return best_loss
| 3,117
| 36.119048
| 103
|
py
|
TTS
|
TTS-master/TTS/vocoder/tf/models/multiband_melgan_generator.py
|
import tensorflow as tf
from TTS.vocoder.tf.models.melgan_generator import MelganGenerator
from TTS.vocoder.tf.layers.pqmf import PQMF
#pylint: disable=too-many-ancestors
#pylint: disable=abstract-method
class MultibandMelganGenerator(MelganGenerator):
def __init__(self,
in_channels=80,
out_channels=4,
proj_kernel=7,
base_channels=384,
upsample_factors=(2, 8, 2, 2),
res_kernel=3,
num_res_blocks=3):
super(MultibandMelganGenerator,
self).__init__(in_channels=in_channels,
out_channels=out_channels,
proj_kernel=proj_kernel,
base_channels=base_channels,
upsample_factors=upsample_factors,
res_kernel=res_kernel,
num_res_blocks=num_res_blocks)
self.pqmf_layer = PQMF(N=4, taps=62, cutoff=0.15, beta=9.0)
def pqmf_analysis(self, x):
return self.pqmf_layer.analysis(x)
def pqmf_synthesis(self, x):
return self.pqmf_layer.synthesis(x)
def inference(self, c):
c = tf.transpose(c, perm=[0, 2, 1])
c = tf.expand_dims(c, 2)
# FIXME: TF had no replicate padding as in Torch
# c = tf.pad(c, [[0, 0], [self.inference_padding, self.inference_padding], [0, 0], [0, 0]], "REFLECT")
o = c
for layer in self.model_layers:
o = layer(o)
o = tf.transpose(o, perm=[0, 3, 2, 1])
o = self.pqmf_layer.synthesis(o[:, :, 0, :])
return o
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([1, 80, None], dtype=tf.float32),
],)
def inference_tflite(self, c):
c = tf.transpose(c, perm=[0, 2, 1])
c = tf.expand_dims(c, 2)
# FIXME: TF had no replicate padding as in Torch
# c = tf.pad(c, [[0, 0], [self.inference_padding, self.inference_padding], [0, 0], [0, 0]], "REFLECT")
o = c
for layer in self.model_layers:
o = layer(o)
o = tf.transpose(o, perm=[0, 3, 2, 1])
o = self.pqmf_layer.synthesis(o[:, :, 0, :])
return o
| 2,290
| 36.557377
| 110
|
py
|
TTS
|
TTS-master/TTS/vocoder/tf/models/melgan_generator.py
|
import logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
from TTS.vocoder.tf.layers.melgan import ResidualStack, ReflectionPad1d
#pylint: disable=too-many-ancestors
#pylint: disable=abstract-method
class MelganGenerator(tf.keras.models.Model):
""" Melgan Generator TF implementation dedicated for inference with no
weight norm """
def __init__(self,
in_channels=80,
out_channels=1,
proj_kernel=7,
base_channels=512,
upsample_factors=(8, 8, 2, 2),
res_kernel=3,
num_res_blocks=3):
super(MelganGenerator, self).__init__()
self.in_channels = in_channels
# assert model parameters
assert (proj_kernel -
1) % 2 == 0, " [!] proj_kernel should be an odd number."
# setup additional model parameters
base_padding = (proj_kernel - 1) // 2
act_slope = 0.2
self.inference_padding = 2
# initial layer
self.initial_layer = [
ReflectionPad1d(base_padding),
tf.keras.layers.Conv2D(filters=base_channels,
kernel_size=(proj_kernel, 1),
strides=1,
padding='valid',
use_bias=True,
name="1")
]
num_layers = 3 # count number of layers for layer naming
# upsampling layers and residual stacks
self.upsample_layers = []
for idx, upsample_factor in enumerate(upsample_factors):
layer_out_channels = base_channels // (2**(idx + 1))
layer_filter_size = upsample_factor * 2
layer_stride = upsample_factor
# layer_output_padding = upsample_factor % 2
self.upsample_layers += [
tf.keras.layers.LeakyReLU(act_slope),
tf.keras.layers.Conv2DTranspose(
filters=layer_out_channels,
kernel_size=(layer_filter_size, 1),
strides=(layer_stride, 1),
padding='same',
# output_padding=layer_output_padding,
use_bias=True,
name=f'{num_layers}'),
ResidualStack(channels=layer_out_channels,
num_res_blocks=num_res_blocks,
kernel_size=res_kernel,
name=f'layers.{num_layers + 1}')
]
num_layers += num_res_blocks - 1
self.upsample_layers += [tf.keras.layers.LeakyReLU(act_slope)]
# final layer
self.final_layers = [
ReflectionPad1d(base_padding),
tf.keras.layers.Conv2D(filters=out_channels,
kernel_size=(proj_kernel, 1),
use_bias=True,
name=f'layers.{num_layers + 1}'),
tf.keras.layers.Activation("tanh")
]
# self.model_layers = tf.keras.models.Sequential(self.initial_layer + self.upsample_layers + self.final_layers, name="layers")
self.model_layers = self.initial_layer + self.upsample_layers + self.final_layers
@tf.function(experimental_relax_shapes=True)
def call(self, c, training=False):
"""
c : B x C x T
"""
if training:
raise NotImplementedError()
return self.inference(c)
def inference(self, c):
c = tf.transpose(c, perm=[0, 2, 1])
c = tf.expand_dims(c, 2)
# FIXME: TF had no replicate padding as in Torch
# c = tf.pad(c, [[0, 0], [self.inference_padding, self.inference_padding], [0, 0], [0, 0]], "REFLECT")
o = c
for layer in self.model_layers:
o = layer(o)
# o = self.model_layers(c)
o = tf.transpose(o, perm=[0, 3, 2, 1])
return o[:, :, 0, :]
def build_inference(self):
x = tf.random.uniform((1, self.in_channels, 4), dtype=tf.float32)
self(x, training=False)
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([1, None, None], dtype=tf.float32),
],)
def inference_tflite(self, c):
c = tf.transpose(c, perm=[0, 2, 1])
c = tf.expand_dims(c, 2)
# FIXME: TF had no replicate padding as in Torch
# c = tf.pad(c, [[0, 0], [self.inference_padding, self.inference_padding], [0, 0], [0, 0]], "REFLECT")
o = c
for layer in self.model_layers:
o = layer(o)
# o = self.model_layers(c)
o = tf.transpose(o, perm=[0, 3, 2, 1])
return o[:, :, 0, :]
| 4,858
| 36.666667
| 134
|
py
|
TTS
|
TTS-master/TTS/vocoder/tf/layers/pqmf.py
|
import numpy as np
import tensorflow as tf
from scipy import signal as sig
class PQMF(tf.keras.layers.Layer):
def __init__(self, N=4, taps=62, cutoff=0.15, beta=9.0):
super(PQMF, self).__init__()
# define filter coefficient
self.N = N
self.taps = taps
self.cutoff = cutoff
self.beta = beta
QMF = sig.firwin(taps + 1, cutoff, window=('kaiser', beta))
H = np.zeros((N, len(QMF)))
G = np.zeros((N, len(QMF)))
for k in range(N):
constant_factor = (2 * k + 1) * (np.pi /
(2 * N)) * (np.arange(taps + 1) -
((taps - 1) / 2))
phase = (-1)**k * np.pi / 4
H[k] = 2 * QMF * np.cos(constant_factor + phase)
G[k] = 2 * QMF * np.cos(constant_factor - phase)
# [N, 1, taps + 1] == [filter_width, in_channels, out_channels]
self.H = np.transpose(H[:, None, :], (2, 1, 0)).astype('float32')
self.G = np.transpose(G[None, :, :], (2, 1, 0)).astype('float32')
# filter for downsampling & upsampling
updown_filter = np.zeros((N, N, N), dtype=np.float32)
for k in range(N):
updown_filter[0, k, k] = 1.0
self.updown_filter = updown_filter.astype(np.float32)
def analysis(self, x):
"""
x : B x 1 x T
"""
x = tf.transpose(x, perm=[0, 2, 1])
x = tf.pad(x, [[0, 0], [self.taps // 2, self.taps // 2], [0, 0]], constant_values=0.0)
x = tf.nn.conv1d(x, self.H, stride=1, padding='VALID')
x = tf.nn.conv1d(x,
self.updown_filter,
stride=self.N,
padding='VALID')
x = tf.transpose(x, perm=[0, 2, 1])
return x
def synthesis(self, x):
"""
x : B x D x T
"""
x = tf.transpose(x, perm=[0, 2, 1])
x = tf.nn.conv1d_transpose(
x,
self.updown_filter * self.N,
strides=self.N,
output_shape=(tf.shape(x)[0], tf.shape(x)[1] * self.N,
self.N))
x = tf.pad(x, [[0, 0], [self.taps // 2, self.taps // 2], [0, 0]], constant_values=0.0)
x = tf.nn.conv1d(x, self.G, stride=1, padding="VALID")
x = tf.transpose(x, perm=[0, 2, 1])
return x
| 2,396
| 34.776119
| 94
|
py
|
TTS
|
TTS-master/TTS/vocoder/tf/layers/melgan.py
|
import tensorflow as tf
class ReflectionPad1d(tf.keras.layers.Layer):
def __init__(self, padding):
super(ReflectionPad1d, self).__init__()
self.padding = padding
def call(self, x):
return tf.pad(x, [[0, 0], [self.padding, self.padding], [0, 0], [0, 0]], "REFLECT")
class ResidualStack(tf.keras.layers.Layer):
def __init__(self, channels, num_res_blocks, kernel_size, name):
super(ResidualStack, self).__init__(name=name)
assert (kernel_size - 1) % 2 == 0, " [!] kernel_size has to be odd."
base_padding = (kernel_size - 1) // 2
self.blocks = []
num_layers = 2
for idx in range(num_res_blocks):
layer_kernel_size = kernel_size
layer_dilation = layer_kernel_size**idx
layer_padding = base_padding * layer_dilation
block = [
tf.keras.layers.LeakyReLU(0.2),
ReflectionPad1d(layer_padding),
tf.keras.layers.Conv2D(filters=channels,
kernel_size=(kernel_size, 1),
dilation_rate=(layer_dilation, 1),
use_bias=True,
padding='valid',
name=f'blocks.{idx}.{num_layers}'),
tf.keras.layers.LeakyReLU(0.2),
tf.keras.layers.Conv2D(filters=channels,
kernel_size=(1, 1),
use_bias=True,
name=f'blocks.{idx}.{num_layers + 2}')
]
self.blocks.append(block)
self.shortcuts = [
tf.keras.layers.Conv2D(channels,
kernel_size=1,
use_bias=True,
name=f'shortcuts.{i}')
for i in range(num_res_blocks)
]
def call(self, x):
for block, shortcut in zip(self.blocks, self.shortcuts):
res = shortcut(x)
for layer in block:
x = layer(x)
x += res
return x
| 2,191
| 37.45614
| 91
|
py
|
TTS
|
TTS-master/TTS/vocoder/tf/utils/generic_utils.py
|
import re
import importlib
def to_camel(text):
text = text.capitalize()
return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), text)
def setup_generator(c):
print(" > Generator Model: {}".format(c.generator_model))
MyModel = importlib.import_module('TTS.vocoder.tf.models.' +
c.generator_model.lower())
MyModel = getattr(MyModel, to_camel(c.generator_model))
if c.generator_model in 'melgan_generator':
model = MyModel(
in_channels=c.audio['num_mels'],
out_channels=1,
proj_kernel=7,
base_channels=512,
upsample_factors=c.generator_model_params['upsample_factors'],
res_kernel=3,
num_res_blocks=c.generator_model_params['num_res_blocks'])
if c.generator_model in 'melgan_fb_generator':
pass
if c.generator_model in 'multiband_melgan_generator':
model = MyModel(
in_channels=c.audio['num_mels'],
out_channels=4,
proj_kernel=7,
base_channels=384,
upsample_factors=c.generator_model_params['upsample_factors'],
res_kernel=3,
num_res_blocks=c.generator_model_params['num_res_blocks'])
return model
| 1,274
| 34.416667
| 74
|
py
|
TTS
|
TTS-master/TTS/vocoder/tf/utils/convert_torch_to_tf_utils.py
|
import numpy as np
import tensorflow as tf
def compare_torch_tf(torch_tensor, tf_tensor):
""" Compute the average absolute difference b/w torch and tf tensors """
return abs(torch_tensor.detach().numpy() - tf_tensor.numpy()).mean()
def convert_tf_name(tf_name):
""" Convert certain patterns in TF layer names to Torch patterns """
tf_name_tmp = tf_name
tf_name_tmp = tf_name_tmp.replace(':0', '')
tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_1/recurrent_kernel', '/weight_hh_l0')
tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_2/kernel', '/weight_ih_l1')
tf_name_tmp = tf_name_tmp.replace('/recurrent_kernel', '/weight_hh')
tf_name_tmp = tf_name_tmp.replace('/kernel', '/weight')
tf_name_tmp = tf_name_tmp.replace('/gamma', '/weight')
tf_name_tmp = tf_name_tmp.replace('/beta', '/bias')
tf_name_tmp = tf_name_tmp.replace('/', '.')
return tf_name_tmp
def transfer_weights_torch_to_tf(tf_vars, var_map_dict, state_dict):
""" Transfer weigths from torch state_dict to TF variables """
print(" > Passing weights from Torch to TF ...")
for tf_var in tf_vars:
torch_var_name = var_map_dict[tf_var.name]
print(f' | > {tf_var.name} <-- {torch_var_name}')
# if tuple, it is a bias variable
if 'kernel' in tf_var.name:
torch_weight = state_dict[torch_var_name]
numpy_weight = torch_weight.permute([2, 1, 0]).numpy()[:, None, :, :]
if 'bias' in tf_var.name:
torch_weight = state_dict[torch_var_name]
numpy_weight = torch_weight
assert np.all(tf_var.shape == numpy_weight.shape), f" [!] weight shapes does not match: {tf_var.name} vs {torch_var_name} --> {tf_var.shape} vs {numpy_weight.shape}"
tf.keras.backend.set_value(tf_var, numpy_weight)
return tf_vars
def load_tf_vars(model_tf, tf_vars):
for tf_var in tf_vars:
model_tf.get_layer(tf_var.name).set_weights(tf_var)
return model_tf
| 1,997
| 42.434783
| 173
|
py
|
TTS
|
TTS-master/TTS/vocoder/tf/utils/tflite.py
|
import tensorflow as tf
def convert_melgan_to_tflite(model,
output_path=None,
experimental_converter=True):
"""Convert Tensorflow MelGAN model to TFLite. Save a binary file if output_path is
provided, else return TFLite model."""
concrete_function = model.inference_tflite.get_concrete_function()
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[concrete_function])
converter.experimental_new_converter = experimental_converter
converter.optimizations = []
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
print(f'Tflite Model size is {len(tflite_model) / (1024.0 * 1024.0)} MBs.')
if output_path is not None:
# same model binary if outputpath is provided
with open(output_path, 'wb') as f:
f.write(tflite_model)
return None
return tflite_model
def load_tflite_model(tflite_path):
tflite_model = tf.lite.Interpreter(model_path=tflite_path)
tflite_model.allocate_tensors()
return tflite_model
| 1,169
| 35.5625
| 86
|
py
|
TTS
|
TTS-master/TTS/vocoder/tf/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/TTS/vocoder/tf/utils/io.py
|
import datetime
import pickle
import tensorflow as tf
def save_checkpoint(model, current_step, epoch, output_path, **kwargs):
""" Save TF Vocoder model """
state = {
'model': model.weights,
'step': current_step,
'epoch': epoch,
'date': datetime.date.today().strftime("%B %d, %Y"),
}
state.update(kwargs)
pickle.dump(state, open(output_path, 'wb'))
def load_checkpoint(model, checkpoint_path):
""" Load TF Vocoder model """
checkpoint = pickle.load(open(checkpoint_path, 'rb'))
chkp_var_dict = {var.name: var.numpy() for var in checkpoint['model']}
tf_vars = model.weights
for tf_var in tf_vars:
layer_name = tf_var.name
chkp_var_value = chkp_var_dict[layer_name]
tf.keras.backend.set_value(tf_var, chkp_var_value)
return model
| 831
| 28.714286
| 74
|
py
|
TTS
|
TTS-master/TTS/server/server.py
|
#!flask/bin/python
import argparse
import os
import sys
import io
from pathlib import Path
from flask import Flask, render_template, request, send_file
from TTS.utils.synthesizer import Synthesizer
from TTS.utils.manage import ModelManager
from TTS.utils.io import load_config
def create_argparser():
def convert_boolean(x):
return x.lower() in ['true', '1', 'yes']
parser = argparse.ArgumentParser()
parser.add_argument('--list_models', type=convert_boolean, nargs='?', const=True, default=False, help='list available pre-trained tts and vocoder models.')
parser.add_argument('--model_name', type=str, help='name of one of the released tts models.')
parser.add_argument('--vocoder_name', type=str, help='name of one of the released vocoder models.')
parser.add_argument('--tts_checkpoint', type=str, help='path to custom tts checkpoint file')
parser.add_argument('--tts_config', type=str, help='path to custom tts config.json file')
parser.add_argument('--tts_speakers', type=str, help='path to JSON file containing speaker ids, if speaker ids are used in the model')
parser.add_argument('--vocoder_config', type=str, default=None, help='path to vocoder config file.')
parser.add_argument('--vocoder_checkpoint', type=str, default=None, help='path to vocoder checkpoint file.')
parser.add_argument('--port', type=int, default=5002, help='port to listen on.')
parser.add_argument('--use_cuda', type=convert_boolean, default=False, help='true to use CUDA.')
parser.add_argument('--debug', type=convert_boolean, default=False, help='true to enable Flask debug mode.')
parser.add_argument('--show_details', type=convert_boolean, default=False, help='Generate model detail page.')
return parser
synthesizer = None
embedded_models_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'model')
embedded_tts_folder = os.path.join(embedded_models_folder, 'tts')
tts_checkpoint_file = os.path.join(embedded_tts_folder, 'checkpoint.pth.tar')
tts_config_file = os.path.join(embedded_tts_folder, 'config.json')
embedded_vocoder_folder = os.path.join(embedded_models_folder, 'vocoder')
vocoder_checkpoint_file = os.path.join(embedded_vocoder_folder, 'checkpoint.pth.tar')
vocoder_config_file = os.path.join(embedded_vocoder_folder, 'config.json')
# These models are soon to be deprecated
embedded_wavernn_folder = os.path.join(embedded_models_folder, 'wavernn')
wavernn_checkpoint_file = os.path.join(embedded_wavernn_folder, 'checkpoint.pth.tar')
wavernn_config_file = os.path.join(embedded_wavernn_folder, 'config.json')
args = create_argparser().parse_args()
path = Path(__file__).parent / "../.models.json"
manager = ModelManager(path)
if args.list_models:
manager.list_models()
sys.exit()
# set models by the released models
if args.model_name is not None:
tts_checkpoint_file, tts_config_file = manager.download_model(args.model_name)
if args.vocoder_name is not None:
vocoder_checkpoint_file, vocoder_config_file = manager.download_model(args.vocoder_name)
# If these were not specified in the CLI args, use default values with embedded model files
if not args.tts_checkpoint and os.path.isfile(tts_checkpoint_file):
args.tts_checkpoint = tts_checkpoint_file
if not args.tts_config and os.path.isfile(tts_config_file):
args.tts_config = tts_config_file
if not args.vocoder_checkpoint and os.path.isfile(vocoder_checkpoint_file):
args.vocoder_checkpoint = vocoder_checkpoint_file
if not args.vocoder_config and os.path.isfile(vocoder_config_file):
args.vocoder_config = vocoder_config_file
synthesizer = Synthesizer(args.tts_checkpoint, args.tts_config, args.vocoder_checkpoint, args.vocoder_config, args.use_cuda)
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', show_details=args.show_details)
@app.route('/details')
def details():
model_config = load_config(args.tts_config)
if args.vocoder_config is not None and os.path.isfile(args.vocoder_config):
vocoder_config = load_config(args.vocoder_config)
else:
vocoder_config = None
return render_template('details.html',
show_details=args.show_details
, model_config=model_config
, vocoder_config=vocoder_config
, args=args.__dict__
)
@app.route('/api/tts', methods=['GET'])
def tts():
text = request.args.get('text')
print(" > Model input: {}".format(text))
wavs = synthesizer.tts(text)
out = io.BytesIO()
synthesizer.save_wav(wavs, out)
return send_file(out, mimetype='audio/wav')
def main():
app.run(debug=args.debug, host='0.0.0.0', port=args.port)
if __name__ == '__main__':
main()
| 4,812
| 40.136752
| 159
|
py
|
TTS
|
TTS-master/TTS/server/__init__.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/tests/test_layers.py
|
import unittest
import torch as T
from TTS.tts.layers.tacotron import Prenet, CBHG, Decoder, Encoder
from TTS.tts.layers.losses import L1LossMasked, SSIMLoss
from TTS.tts.utils.generic_utils import sequence_mask
# pylint: disable=unused-variable
class PrenetTests(unittest.TestCase):
def test_in_out(self): #pylint: disable=no-self-use
layer = Prenet(128, out_features=[256, 128])
dummy_input = T.rand(4, 128)
print(layer)
output = layer(dummy_input)
assert output.shape[0] == 4
assert output.shape[1] == 128
class CBHGTests(unittest.TestCase):
def test_in_out(self):
#pylint: disable=attribute-defined-outside-init
layer = self.cbhg = CBHG(
128,
K=8,
conv_bank_features=80,
conv_projections=[160, 128],
highway_features=80,
gru_features=80,
num_highways=4)
# B x D x T
dummy_input = T.rand(4, 128, 8)
print(layer)
output = layer(dummy_input)
assert output.shape[0] == 4
assert output.shape[1] == 8
assert output.shape[2] == 160
class DecoderTests(unittest.TestCase):
@staticmethod
def test_in_out():
layer = Decoder(
in_channels=256,
frame_channels=80,
r=2,
memory_size=4,
attn_windowing=False,
attn_norm="sigmoid",
attn_K=5,
attn_type="original",
prenet_type='original',
prenet_dropout=True,
forward_attn=True,
trans_agent=True,
forward_attn_mask=True,
location_attn=True,
separate_stopnet=True)
dummy_input = T.rand(4, 8, 256)
dummy_memory = T.rand(4, 2, 80)
output, alignment, stop_tokens = layer(
dummy_input, dummy_memory, mask=None)
assert output.shape[0] == 4
assert output.shape[1] == 80, "size not {}".format(output.shape[1])
assert output.shape[2] == 2, "size not {}".format(output.shape[2])
assert stop_tokens.shape[0] == 4
class EncoderTests(unittest.TestCase):
def test_in_out(self): #pylint: disable=no-self-use
layer = Encoder(128)
dummy_input = T.rand(4, 8, 128)
print(layer)
output = layer(dummy_input)
print(output.shape)
assert output.shape[0] == 4
assert output.shape[1] == 8
assert output.shape[2] == 256 # 128 * 2 BiRNN
class L1LossMaskedTests(unittest.TestCase):
def test_in_out(self): #pylint: disable=no-self-use
# test input == target
layer = L1LossMasked(seq_len_norm=False)
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 8, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(5, 9)).long()
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
# seq_len_norm = True
# test input == target
layer = L1LossMasked(seq_len_norm=True)
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 8, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(5, 9)).long()
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
class SSIMLossTests(unittest.TestCase):
def test_in_out(self): #pylint: disable=no-self-use
# test input == target
layer = SSIMLoss()
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert abs(output.item() - 1.0) < 1e-4 , "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert abs(output.item() - 1.0) < 1e-4, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 8, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(5, 9)).long()
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
# seq_len_norm = True
# test input == target
layer = L1LossMasked(seq_len_norm=True)
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 8, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(5, 9)).long()
mask = (
(sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
| 8,598
| 37.909502
| 82
|
py
|
TTS
|
TTS-master/tests/test_vocoder_melgan_generator.py
|
import numpy as np
import torch
from TTS.vocoder.models.melgan_generator import MelganGenerator
def test_melgan_generator():
model = MelganGenerator()
print(model)
dummy_input = torch.rand((4, 80, 64))
output = model(dummy_input)
assert np.all(output.shape == (4, 1, 64 * 256))
output = model.inference(dummy_input)
assert np.all(output.shape == (4, 1, (64 + 4) * 256))
| 400
| 27.642857
| 63
|
py
|
TTS
|
TTS-master/tests/test_text_processing.py
|
import os
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
# pylint: disable=unused-import
import unittest
from tests import get_tests_input_path
from TTS.tts.utils.text import *
from tests import get_tests_path
from TTS.utils.io import load_config
conf = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
def test_phoneme_to_sequence():
text = "Recent research at Harvard has shown meditating for as little as 8 weeks can actually increase, the grey matter in the parts of the brain responsible for emotional regulation and learning!"
text_cleaner = ["phoneme_cleaners"]
lang = "en-us"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters)
gt = "ɹiːsənt ɹɪsɜːtʃ æt hɑːɹvɚd hɐz ʃoʊn mɛdᵻteɪɾɪŋ fɔːɹ æz lɪɾəl æz eɪt wiːks kæn æktʃuːəli ɪnkɹiːs, ðə ɡɹeɪ mæɾɚɹ ɪnðə pɑːɹts ʌvðə bɹeɪn ɹɪspɑːnsəbəl fɔːɹ ɪmoʊʃənəl ɹɛɡjuːleɪʃən ænd lɜːnɪŋ!"
assert text_hat == text_hat_with_params == gt
# multiple punctuations
text = "Be a voice, not an! echo?"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters)
gt = "biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ?"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# not ending with punctuation
text = "Be a voice, not an! echo"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters)
gt = "biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# original
text = "Be a voice, not an echo!"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters)
gt = "biː ɐ vɔɪs, nɑːt ɐn ɛkoʊ!"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# extra space after the sentence
text = "Be a voice, not an! echo. "
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters)
gt = "biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ."
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# extra space after the sentence
text = "Be a voice, not an! echo. "
sequence = phoneme_to_sequence(text, text_cleaner, lang, True)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters)
gt = "^biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ.~"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# padding char
text = "_Be a _voice, not an! echo_"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters)
gt = "biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
def test_phoneme_to_sequence_with_blank_token():
text = "Recent research at Harvard has shown meditating for as little as 8 weeks can actually increase, the grey matter in the parts of the brain responsible for emotional regulation and learning!"
text_cleaner = ["phoneme_cleaners"]
lang = "en-us"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters, add_blank=True)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters, add_blank=True)
gt = "ɹiːsənt ɹɪsɜːtʃ æt hɑːɹvɚd hɐz ʃoʊn mɛdᵻteɪɾɪŋ fɔːɹ æz lɪɾəl æz eɪt wiːks kæn æktʃuːəli ɪnkɹiːs, ðə ɡɹeɪ mæɾɚɹ ɪnðə pɑːɹts ʌvðə bɹeɪn ɹɪspɑːnsəbəl fɔːɹ ɪmoʊʃənəl ɹɛɡjuːleɪʃən ænd lɜːnɪŋ!"
assert text_hat == text_hat_with_params == gt
# multiple punctuations
text = "Be a voice, not an! echo?"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters, add_blank=True)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters, add_blank=True)
gt = "biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ?"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# not ending with punctuation
text = "Be a voice, not an! echo"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters, add_blank=True)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters, add_blank=True)
gt = "biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# original
text = "Be a voice, not an echo!"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters, add_blank=True)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters, add_blank=True)
gt = "biː ɐ vɔɪs, nɑːt ɐn ɛkoʊ!"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# extra space after the sentence
text = "Be a voice, not an! echo. "
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters, add_blank=True)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters, add_blank=True)
gt = "biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ."
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# extra space after the sentence
text = "Be a voice, not an! echo. "
sequence = phoneme_to_sequence(text, text_cleaner, lang, True)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters, add_blank=True)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters, add_blank=True)
gt = "^biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ.~"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
# padding char
text = "_Be a _voice, not an! echo_"
sequence = phoneme_to_sequence(text, text_cleaner, lang)
text_hat = sequence_to_phoneme(sequence)
_ = phoneme_to_sequence(text, text_cleaner, lang, tp=conf.characters, add_blank=True)
text_hat_with_params = sequence_to_phoneme(sequence, tp=conf.characters, add_blank=True)
gt = "biː ɐ vɔɪs, nɑːt ɐn! ɛkoʊ"
print(text_hat)
print(len(sequence))
assert text_hat == text_hat_with_params == gt
def test_text2phone():
text = "Recent research at Harvard has shown meditating for as little as 8 weeks can actually increase, the grey matter in the parts of the brain responsible for emotional regulation and learning!"
gt = "ɹ|iː|s|ə|n|t| |ɹ|ɪ|s|ɜː|tʃ| |æ|t| |h|ɑːɹ|v|ɚ|d| |h|ɐ|z| |ʃ|oʊ|n| |m|ɛ|d|ᵻ|t|eɪ|ɾ|ɪ|ŋ| |f|ɔː|ɹ| |æ|z| |l|ɪ|ɾ|əl| |æ|z| |eɪ|t| |w|iː|k|s| |k|æ|n| |æ|k|tʃ|uː|əl|i| |ɪ|n|k|ɹ|iː|s|,| |ð|ə| |ɡ|ɹ|eɪ| |m|æ|ɾ|ɚ|ɹ| |ɪ|n|ð|ə| |p|ɑːɹ|t|s| |ʌ|v|ð|ə| |b|ɹ|eɪ|n| |ɹ|ɪ|s|p|ɑː|n|s|ə|b|əl| |f|ɔː|ɹ| |ɪ|m|oʊ|ʃ|ə|n|əl| |ɹ|ɛ|ɡ|j|uː|l|eɪ|ʃ|ə|n| |æ|n|d| |l|ɜː|n|ɪ|ŋ|!"
lang = "en-us"
ph = text2phone(text, lang)
assert gt == ph
| 8,369
| 46.828571
| 355
|
py
|
TTS
|
TTS-master/tests/test_preprocessors.py
|
import unittest
import os
from tests import get_tests_input_path
from TTS.tts.datasets.preprocess import common_voice
class TestPreprocessors(unittest.TestCase):
def test_common_voice_preprocessor(self): #pylint: disable=no-self-use
root_path = get_tests_input_path()
meta_file = "common_voice.tsv"
items = common_voice(root_path, meta_file)
assert items[0][0] == 'The applicants are invited for coffee and visa is given immediately.'
assert items[0][1] == os.path.join(get_tests_input_path(), "clips", "common_voice_en_20005954.wav")
assert items[-1][0] == "Competition for limited resources has also resulted in some local conflicts."
assert items[-1][1] == os.path.join(get_tests_input_path(), "clips", "common_voice_en_19737074.wav")
| 804
| 41.368421
| 109
|
py
|
TTS
|
TTS-master/tests/test_glow_tts.py
|
import copy
import os
import unittest
import torch
from tests import get_tests_input_path
from torch import optim
from TTS.tts.layers.losses import GlowTTSLoss
from TTS.tts.models.glow_tts import GlowTts
from TTS.utils.io import load_config
from TTS.utils.audio import AudioProcessor
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
c = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
ap = AudioProcessor(**c.audio)
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
def count_parameters(model):
r"""Count number of trainable parameters in a network"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class GlowTTSTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
mel_spec = torch.rand(8, c.audio['num_mels'], 30).to(device)
linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
criterion = criterion = GlowTTSLoss()
# model to train
model = GlowTts(
num_chars=32,
hidden_channels_enc=128,
hidden_channels_dec=128,
hidden_channels_dp=32,
out_channels=80,
encoder_type='rel_pos_transformer',
encoder_params={
'kernel_size': 3,
'dropout_p': 0.1,
'num_layers': 6,
'num_heads': 2,
'hidden_channels_ffn': 768, # 4 times the hidden_channels
'input_length': None
},
use_encoder_prenet=True,
num_flow_blocks_dec=12,
kernel_size_dec=5,
dilation_rate=5,
num_block_layers=4,
dropout_p_dec=0.,
num_speakers=0,
c_in_channels=0,
num_splits=4,
num_squeeze=1,
sigmoid_scale=False,
mean_only=False).to(device)
# reference model to compare model weights
model_ref = GlowTts(
num_chars=32,
hidden_channels_enc=128,
hidden_channels_dec=128,
hidden_channels_dp=32,
out_channels=80,
encoder_type='rel_pos_transformer',
encoder_params={
'kernel_size': 3,
'dropout_p': 0.1,
'num_layers': 6,
'num_heads': 2,
'hidden_channels_ffn': 768, # 4 times the hidden_channels
'input_length': None
},
use_encoder_prenet=True,
num_flow_blocks_dec=12,
kernel_size_dec=5,
dilation_rate=5,
num_block_layers=4,
dropout_p_dec=0.,
num_speakers=0,
c_in_channels=0,
num_splits=4,
num_squeeze=1,
sigmoid_scale=False,
mean_only=False).to(device)
model.train()
print(" > Num parameters for GlowTTS model:%s" %
(count_parameters(model)))
# pass the state to ref model
model_ref.load_state_dict(copy.deepcopy(model.state_dict()))
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(5):
z, logdet, y_mean, y_log_scale, alignments, o_dur_log, o_total_dur = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, None)
optimizer.zero_grad()
loss_dict = criterion(z, y_mean, y_log_scale, logdet, mel_lengths,
o_dur_log, o_total_dur, input_lengths)
loss = loss_dict['loss']
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
| 4,612
| 33.684211
| 95
|
py
|
TTS
|
TTS-master/tests/test_train_tts.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/tests/test_vocoder_rwd.py
|
import torch
import numpy as np
from TTS.vocoder.models.random_window_discriminator import RandomWindowDiscriminator
def test_rwd():
layer = RandomWindowDiscriminator(cond_channels=80,
window_sizes=(512, 1024, 2048, 4096,
8192),
cond_disc_downsample_factors=[
(8, 4, 2, 2, 2), (8, 4, 2, 2),
(8, 4, 2), (8, 4), (4, 2, 2)
],
hop_length=256)
x = torch.rand([4, 1, 22050])
c = torch.rand([4, 80, 22050 // 256])
scores, _ = layer(x, c)
assert len(scores) == 10
assert np.all(scores[0].shape == (4, 1, 1))
| 816
| 36.136364
| 84
|
py
|
TTS
|
TTS-master/tests/test_text_cleaners.py
|
#!/usr/bin/env python3
from TTS.tts.utils.text.cleaners import english_cleaners, phoneme_cleaners
def test_time() -> None:
assert english_cleaners("It's 11:00") == "it's eleven a m"
assert english_cleaners("It's 9:01") == "it's nine oh one a m"
assert english_cleaners("It's 16:00") == "it's four p m"
assert english_cleaners("It's 00:00 am") == "it's twelve a m"
def test_currency() -> None:
assert phoneme_cleaners("It's $10.50") == "It's ten dollars fifty cents"
assert phoneme_cleaners("£1.1") == "one pound sterling one penny"
assert phoneme_cleaners("¥1") == "one yen"
def test_expand_numbers() -> None:
assert "minus one" == phoneme_cleaners("-1")
assert "one" == phoneme_cleaners("1")
| 736
| 32.5
| 76
|
py
|
TTS
|
TTS-master/tests/test_demo_server.py
|
import os
import unittest
from tests import get_tests_input_path, get_tests_output_path
from TTS.utils.synthesizer import Synthesizer
from TTS.tts.utils.generic_utils import setup_model
from TTS.tts.utils.io import save_checkpoint
from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols
from TTS.utils.io import load_config
class DemoServerTest(unittest.TestCase):
# pylint: disable=R0201
def _create_random_model(self):
# pylint: disable=global-statement
global symbols, phonemes
config = load_config(os.path.join(get_tests_output_path(), 'dummy_model_config.json'))
if 'characters' in config.keys():
symbols, phonemes = make_symbols(**config.characters)
num_chars = len(phonemes) if config.use_phonemes else len(symbols)
model = setup_model(num_chars, 0, config)
output_path = os.path.join(get_tests_output_path())
save_checkpoint(model, None, 10, 10, 1, output_path)
def test_in_out(self):
self._create_random_model()
config = load_config(os.path.join(get_tests_input_path(), 'server_config.json'))
tts_root_path = get_tests_output_path()
config['tts_checkpoint'] = os.path.join(tts_root_path, config['tts_checkpoint'])
config['tts_config'] = os.path.join(tts_root_path, config['tts_config'])
synthesizer = Synthesizer(config['tts_checkpoint'], config['tts_config'], None, None)
synthesizer.tts("Better this test works!!")
def test_split_into_sentences(self):
"""Check demo server sentences split as expected"""
print("\n > Testing demo server sentence splitting")
# pylint: disable=attribute-defined-outside-init
self.seg = Synthesizer.get_segmenter("en")
sis = Synthesizer.split_into_sentences
assert sis(self, 'Hello. Two sentences') == ['Hello.', 'Two sentences']
assert sis(self, 'He went to meet the adviser from Scott, Waltman & Co. next morning.') == ['He went to meet the adviser from Scott, Waltman & Co. next morning.']
assert sis(self, 'Let\'s run it past Sarah and co. They\'ll want to see this.') == ['Let\'s run it past Sarah and co.', 'They\'ll want to see this.']
assert sis(self, 'Where is Bobby Jr.\'s rabbit?') == ['Where is Bobby Jr.\'s rabbit?']
assert sis(self, 'Please inform the U.K. authorities right away.') == ['Please inform the U.K. authorities right away.']
assert sis(self, 'Were David and co. at the event?') == ['Were David and co. at the event?']
assert sis(self, 'paging dr. green, please come to theatre four immediately.') == ['paging dr. green, please come to theatre four immediately.']
assert sis(self, 'The email format is Firstname.Lastname@example.com. I think you reversed them.') == ['The email format is Firstname.Lastname@example.com.', 'I think you reversed them.']
assert sis(self, 'The demo site is: https://top100.example.com/subsection/latestnews.html. Please send us your feedback.') == ['The demo site is: https://top100.example.com/subsection/latestnews.html.', 'Please send us your feedback.']
assert sis(self, 'Scowling at him, \'You are not done yet!\' she yelled.') == ['Scowling at him, \'You are not done yet!\' she yelled.'] # with the final lowercase "she" we see it's all one sentence
assert sis(self, 'Hey!! So good to see you.') == ['Hey!!', 'So good to see you.']
assert sis(self, 'He went to Yahoo! but I don\'t know the division.') == ['He went to Yahoo! but I don\'t know the division.']
assert sis(self, 'If you can\'t remember a quote, “at least make up a memorable one that\'s plausible..."') == ['If you can\'t remember a quote, “at least make up a memorable one that\'s plausible..."']
assert sis(self, 'The address is not google.com.') == ['The address is not google.com.']
assert sis(self, '1.) The first item 2.) The second item') == ['1.) The first item', '2.) The second item']
assert sis(self, '1) The first item 2) The second item') == ['1) The first item', '2) The second item']
assert sis(self, 'a. The first item b. The second item c. The third list item') == ['a. The first item', 'b. The second item', 'c. The third list item']
| 4,265
| 72.551724
| 243
|
py
|
TTS
|
TTS-master/tests/test_vocoder_tf_melgan_generator.py
|
import numpy as np
import tensorflow as tf
from TTS.vocoder.tf.models.melgan_generator import MelganGenerator
def test_melgan_generator():
hop_length = 256
model = MelganGenerator()
# pylint: disable=no-value-for-parameter
dummy_input = tf.random.uniform((4, 80, 64))
output = model(dummy_input, training=False)
assert np.all(output.shape == (4, 1, 64 * hop_length)), output.shape
| 408
| 28.214286
| 72
|
py
|
TTS
|
TTS-master/tests/test_tacotron_model.py
|
import copy
import os
import unittest
import torch
from tests import get_tests_input_path
from torch import nn, optim
from TTS.tts.layers.losses import L1LossMasked
from TTS.tts.models.tacotron import Tacotron
from TTS.utils.io import load_config
from TTS.utils.audio import AudioProcessor
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
c = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
ap = AudioProcessor(**c.audio)
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
def count_parameters(model):
r"""Count number of trainable parameters in a network"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class TacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) >
0.0).unsqueeze(2).float().squeeze()
criterion = L1LossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron(
num_chars=32,
num_speakers=5,
postnet_output_dim=c.audio['fft_size'],
decoder_output_dim=c.audio['num_mels'],
r=c.r,
memory_size=c.memory_size
).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor
model.train()
print(" > Num parameters for Tacotron model:%s" %
(count_parameters(model)))
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(5):
mel_out, linear_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(linear_out, linear_spec,
mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class MultiSpeakeTacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_embeddings = torch.rand(8, 55).to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) >
0.0).unsqueeze(2).float().squeeze()
criterion = L1LossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron(
num_chars=32,
num_speakers=5,
postnet_output_dim=c.audio['fft_size'],
decoder_output_dim=c.audio['num_mels'],
r=c.r,
memory_size=c.memory_size,
speaker_embedding_dim=55,
).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor
model.train()
print(" > Num parameters for Tacotron model:%s" %
(count_parameters(model)))
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(5):
mel_out, linear_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths,
speaker_embeddings=speaker_embeddings)
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(linear_out, linear_spec,
mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class TacotronGSTTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
# with random gst mel style
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
mel_spec = torch.rand(8, 120, c.audio['num_mels']).to(device)
linear_spec = torch.rand(8, 120, c.audio['fft_size']).to(device)
mel_lengths = torch.randint(20, 120, (8, )).long().to(device)
mel_lengths[-1] = 120
stop_targets = torch.zeros(8, 120, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) >
0.0).unsqueeze(2).float().squeeze()
criterion = L1LossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron(
num_chars=32,
num_speakers=5,
gst=True,
gst_embedding_dim=c.gst['gst_embedding_dim'],
gst_num_heads=c.gst['gst_num_heads'],
gst_style_tokens=c.gst['gst_style_tokens'],
postnet_output_dim=c.audio['fft_size'],
decoder_output_dim=c.audio['num_mels'],
r=c.r,
memory_size=c.memory_size
).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor
model.train()
# print(model)
print(" > Num parameters for Tacotron GST model:%s" %
(count_parameters(model)))
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(10):
mel_out, linear_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(linear_out, linear_spec,
mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
# with file gst style
mel_spec = torch.FloatTensor(ap.melspectrogram(ap.load_wav(WAV_FILE)))[:, :120].unsqueeze(0).transpose(1, 2).to(device)
mel_spec = mel_spec.repeat(8, 1, 1)
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
linear_spec = torch.rand(8, mel_spec.size(1), c.audio['fft_size']).to(device)
mel_lengths = torch.randint(20, mel_spec.size(1), (8, )).long().to(device)
mel_lengths[-1] = mel_spec.size(1)
stop_targets = torch.zeros(8, mel_spec.size(1), 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) >
0.0).unsqueeze(2).float().squeeze()
criterion = L1LossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron(
num_chars=32,
num_speakers=5,
gst=True,
gst_embedding_dim=c.gst['gst_embedding_dim'],
gst_num_heads=c.gst['gst_num_heads'],
gst_style_tokens=c.gst['gst_style_tokens'],
postnet_output_dim=c.audio['fft_size'],
decoder_output_dim=c.audio['num_mels'],
r=c.r,
memory_size=c.memory_size
).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor
model.train()
# print(model)
print(" > Num parameters for Tacotron GST model:%s" %
(count_parameters(model)))
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(10):
mel_out, linear_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(linear_out, linear_spec,
mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class SCGSTMultiSpeakeTacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[-1] = mel_spec.size(1)
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_embeddings = torch.rand(8, 55).to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) >
0.0).unsqueeze(2).float().squeeze()
criterion = L1LossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron(
num_chars=32,
num_speakers=5,
postnet_output_dim=c.audio['fft_size'],
decoder_output_dim=c.audio['num_mels'],
gst=True,
gst_embedding_dim=c.gst['gst_embedding_dim'],
gst_num_heads=c.gst['gst_num_heads'],
gst_style_tokens=c.gst['gst_style_tokens'],
gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding'],
r=c.r,
memory_size=c.memory_size,
speaker_embedding_dim=55,
).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor
model.train()
print(" > Num parameters for Tacotron model:%s" %
(count_parameters(model)))
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(5):
mel_out, linear_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths,
speaker_embeddings=speaker_embeddings)
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(linear_out, linear_spec,
mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
continue
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
| 15,960
| 43.336111
| 127
|
py
|
TTS
|
TTS-master/tests/test_vocoder_losses.py
|
import os
import torch
from tests import get_tests_input_path, get_tests_output_path, get_tests_path
from TTS.utils.audio import AudioProcessor
from TTS.utils.io import load_config
from TTS.vocoder.layers.losses import MultiScaleSTFTLoss, STFTLoss, TorchSTFT
TESTS_PATH = get_tests_path()
OUT_PATH = os.path.join(get_tests_output_path(), "audio_tests")
os.makedirs(OUT_PATH, exist_ok=True)
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
C = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
ap = AudioProcessor(**C.audio)
def test_torch_stft():
torch_stft = TorchSTFT(ap.fft_size, ap.hop_length, ap.win_length)
# librosa stft
wav = ap.load_wav(WAV_FILE)
M_librosa = abs(ap._stft(wav)) # pylint: disable=protected-access
# torch stft
wav = torch.from_numpy(wav[None, :]).float()
M_torch = torch_stft(wav)
# check the difference b/w librosa and torch outputs
assert (M_librosa - M_torch[0].data.numpy()).max() < 1e-5
def test_stft_loss():
stft_loss = STFTLoss(ap.fft_size, ap.hop_length, ap.win_length)
wav = ap.load_wav(WAV_FILE)
wav = torch.from_numpy(wav[None, :]).float()
loss_m, loss_sc = stft_loss(wav, wav)
assert loss_m + loss_sc == 0
loss_m, loss_sc = stft_loss(wav, torch.rand_like(wav))
assert loss_sc < 1.0
assert loss_m + loss_sc > 0
def test_multiscale_stft_loss():
stft_loss = MultiScaleSTFTLoss([ap.fft_size//2, ap.fft_size, ap.fft_size*2],
[ap.hop_length // 2, ap.hop_length, ap.hop_length * 2],
[ap.win_length // 2, ap.win_length, ap.win_length * 2])
wav = ap.load_wav(WAV_FILE)
wav = torch.from_numpy(wav[None, :]).float()
loss_m, loss_sc = stft_loss(wav, wav)
assert loss_m + loss_sc == 0
loss_m, loss_sc = stft_loss(wav, torch.rand_like(wav))
assert loss_sc < 1.0
assert loss_m + loss_sc > 0
| 1,931
| 34.127273
| 90
|
py
|
TTS
|
TTS-master/tests/test_loader.py
|
import os
import shutil
import unittest
import numpy as np
import torch
from tests import get_tests_input_path, get_tests_output_path
from torch.utils.data import DataLoader
from TTS.tts.datasets import TTSDataset
from TTS.tts.datasets.preprocess import ljspeech
from TTS.utils.audio import AudioProcessor
from TTS.utils.io import load_config
#pylint: disable=unused-variable
OUTPATH = os.path.join(get_tests_output_path(), "loader_tests/")
os.makedirs(OUTPATH, exist_ok=True)
c = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
ok_ljspeech = os.path.exists(c.data_path)
DATA_EXIST = True
if not os.path.exists(c.data_path):
DATA_EXIST = False
print(" > Dynamic data loader test: {}".format(DATA_EXIST))
class TestTTSDataset(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestTTSDataset, self).__init__(*args, **kwargs)
self.max_loader_iter = 4
self.ap = AudioProcessor(**c.audio)
def _create_dataloader(self, batch_size, r, bgs):
items = ljspeech(c.data_path, 'metadata.csv')
dataset = TTSDataset.MyDataset(
r,
c.text_cleaner,
compute_linear_spec=True,
ap=self.ap,
meta_data=items,
tp=c.characters if 'characters' in c.keys() else None,
batch_group_size=bgs,
min_seq_len=c.min_seq_len,
max_seq_len=float("inf"),
use_phonemes=False)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=dataset.collate_fn,
drop_last=True,
num_workers=c.num_loader_workers)
return dataloader, dataset
def test_loader(self):
if ok_ljspeech:
dataloader, dataset = self._create_dataloader(2, c.r, 0)
for i, data in enumerate(dataloader):
if i == self.max_loader_iter:
break
text_input = data[0]
text_lengths = data[1]
speaker_name = data[2]
linear_input = data[3]
mel_input = data[4]
mel_lengths = data[5]
stop_target = data[6]
item_idx = data[7]
neg_values = text_input[text_input < 0]
check_count = len(neg_values)
assert check_count == 0, \
" !! Negative values in text_input: {}".format(check_count)
# TODO: more assertion here
assert isinstance(speaker_name[0], str)
assert linear_input.shape[0] == c.batch_size
assert linear_input.shape[2] == self.ap.fft_size // 2 + 1
assert mel_input.shape[0] == c.batch_size
assert mel_input.shape[2] == c.audio['num_mels']
# check normalization ranges
if self.ap.symmetric_norm:
assert mel_input.max() <= self.ap.max_norm
assert mel_input.min() >= -self.ap.max_norm #pylint: disable=invalid-unary-operand-type
assert mel_input.min() < 0
else:
assert mel_input.max() <= self.ap.max_norm
assert mel_input.min() >= 0
def test_batch_group_shuffle(self):
if ok_ljspeech:
dataloader, dataset = self._create_dataloader(2, c.r, 16)
last_length = 0
frames = dataset.items
for i, data in enumerate(dataloader):
if i == self.max_loader_iter:
break
text_input = data[0]
text_lengths = data[1]
speaker_name = data[2]
linear_input = data[3]
mel_input = data[4]
mel_lengths = data[5]
stop_target = data[6]
item_idx = data[7]
avg_length = mel_lengths.numpy().mean()
assert avg_length >= last_length
dataloader.dataset.sort_items()
is_items_reordered = False
for idx, item in enumerate(dataloader.dataset.items):
if item != frames[idx]:
is_items_reordered = True
break
assert is_items_reordered
def test_padding_and_spec(self):
if ok_ljspeech:
dataloader, dataset = self._create_dataloader(1, 1, 0)
for i, data in enumerate(dataloader):
if i == self.max_loader_iter:
break
text_input = data[0]
text_lengths = data[1]
speaker_name = data[2]
linear_input = data[3]
mel_input = data[4]
mel_lengths = data[5]
stop_target = data[6]
item_idx = data[7]
# check mel_spec consistency
wav = np.asarray(self.ap.load_wav(item_idx[0]), dtype=np.float32)
mel = self.ap.melspectrogram(wav).astype('float32')
mel = torch.FloatTensor(mel).contiguous()
mel_dl = mel_input[0]
# NOTE: Below needs to check == 0 but due to an unknown reason
# there is a slight difference between two matrices.
# TODO: Check this assert cond more in detail.
assert abs(mel.T - mel_dl).max() < 1e-5, abs(mel.T - mel_dl).max()
# check mel-spec correctness
mel_spec = mel_input[0].cpu().numpy()
wav = self.ap.inv_melspectrogram(mel_spec.T)
self.ap.save_wav(wav, OUTPATH + '/mel_inv_dataloader.wav')
shutil.copy(item_idx[0], OUTPATH + '/mel_target_dataloader.wav')
# check linear-spec
linear_spec = linear_input[0].cpu().numpy()
wav = self.ap.inv_spectrogram(linear_spec.T)
self.ap.save_wav(wav, OUTPATH + '/linear_inv_dataloader.wav')
shutil.copy(item_idx[0],
OUTPATH + '/linear_target_dataloader.wav')
# check the last time step to be zero padded
assert linear_input[0, -1].sum() != 0
assert linear_input[0, -2].sum() != 0
assert mel_input[0, -1].sum() != 0
assert mel_input[0, -2].sum() != 0
assert stop_target[0, -1] == 1
assert stop_target[0, -2] == 0
assert stop_target.sum() == 1
assert len(mel_lengths.shape) == 1
assert mel_lengths[0] == linear_input[0].shape[0]
assert mel_lengths[0] == mel_input[0].shape[0]
# Test for batch size 2
dataloader, dataset = self._create_dataloader(2, 1, 0)
for i, data in enumerate(dataloader):
if i == self.max_loader_iter:
break
text_input = data[0]
text_lengths = data[1]
speaker_name = data[2]
linear_input = data[3]
mel_input = data[4]
mel_lengths = data[5]
stop_target = data[6]
item_idx = data[7]
if mel_lengths[0] > mel_lengths[1]:
idx = 0
else:
idx = 1
# check the first item in the batch
assert linear_input[idx, -1].sum() != 0
assert linear_input[idx, -2].sum() != 0, linear_input
assert mel_input[idx, -1].sum() != 0
assert mel_input[idx, -2].sum() != 0, mel_input
assert stop_target[idx, -1] == 1
assert stop_target[idx, -2] == 0
assert stop_target[idx].sum() == 1
assert len(mel_lengths.shape) == 1
assert mel_lengths[idx] == mel_input[idx].shape[0]
assert mel_lengths[idx] == linear_input[idx].shape[0]
# check the second itme in the batch
assert linear_input[1 - idx, -1].sum() == 0
assert mel_input[1 - idx, -1].sum() == 0
assert stop_target[1, mel_lengths[1]-1] == 1
assert stop_target[1, mel_lengths[1]:].sum() == 0
assert len(mel_lengths.shape) == 1
# check batch zero-frame conditions (zero-frame disabled)
# assert (linear_input * stop_target.unsqueeze(2)).sum() == 0
# assert (mel_input * stop_target.unsqueeze(2)).sum() == 0
| 8,580
| 39.476415
| 108
|
py
|
TTS
|
TTS-master/tests/test_encoder.py
|
import os
import unittest
import torch as T
from tests import get_tests_input_path
from TTS.speaker_encoder.losses import GE2ELoss, AngleProtoLoss
from TTS.speaker_encoder.model import SpeakerEncoder
from TTS.utils.io import load_config
file_path = get_tests_input_path()
c = load_config(os.path.join(file_path, "test_config.json"))
class SpeakerEncoderTests(unittest.TestCase):
# pylint: disable=R0201
def test_in_out(self):
dummy_input = T.rand(4, 20, 80) # B x T x D
dummy_hidden = [T.rand(2, 4, 128), T.rand(2, 4, 128)]
model = SpeakerEncoder(
input_dim=80, proj_dim=256, lstm_dim=768, num_lstm_layers=3
)
# computing d vectors
output = model.forward(dummy_input)
assert output.shape[0] == 4
assert output.shape[1] == 256
output = model.inference(dummy_input)
assert output.shape[0] == 4
assert output.shape[1] == 256
# compute d vectors by passing LSTM hidden
# output = model.forward(dummy_input, dummy_hidden)
# assert output.shape[0] == 4
# assert output.shape[1] == 20
# assert output.shape[2] == 256
# check normalization
output_norm = T.nn.functional.normalize(output, dim=1, p=2)
assert_diff = (output_norm - output).sum().item()
assert output.type() == "torch.FloatTensor"
assert (
abs(assert_diff) < 1e-4
), f" [!] output_norm has wrong values - {assert_diff}"
# compute d for a given batch
dummy_input = T.rand(1, 240, 80) # B x T x D
output = model.compute_embedding(dummy_input, num_frames=160, overlap=0.5)
assert output.shape[0] == 1
assert output.shape[1] == 256
assert len(output.shape) == 2
class GE2ELossTests(unittest.TestCase):
# pylint: disable=R0201
def test_in_out(self):
# check random input
dummy_input = T.rand(4, 5, 64) # num_speaker x num_utterance x dim
loss = GE2ELoss(loss_method="softmax")
output = loss.forward(dummy_input)
assert output.item() >= 0.0
# check all zeros
dummy_input = T.ones(4, 5, 64) # num_speaker x num_utterance x dim
loss = GE2ELoss(loss_method="softmax")
output = loss.forward(dummy_input)
assert output.item() >= 0.0
# check speaker loss with orthogonal d-vectors
dummy_input = T.empty(3, 64)
dummy_input = T.nn.init.orthogonal_(dummy_input)
dummy_input = T.cat(
[
dummy_input[0].repeat(5, 1, 1).transpose(0, 1),
dummy_input[1].repeat(5, 1, 1).transpose(0, 1),
dummy_input[2].repeat(5, 1, 1).transpose(0, 1),
]
) # num_speaker x num_utterance x dim
loss = GE2ELoss(loss_method="softmax")
output = loss.forward(dummy_input)
assert output.item() < 0.005
class AngleProtoLossTests(unittest.TestCase):
# pylint: disable=R0201
def test_in_out(self):
# check random input
dummy_input = T.rand(4, 5, 64) # num_speaker x num_utterance x dim
loss = AngleProtoLoss()
output = loss.forward(dummy_input)
assert output.item() >= 0.0
# check all zeros
dummy_input = T.ones(4, 5, 64) # num_speaker x num_utterance x dim
loss = AngleProtoLoss()
output = loss.forward(dummy_input)
assert output.item() >= 0.0
# check speaker loss with orthogonal d-vectors
dummy_input = T.empty(3, 64)
dummy_input = T.nn.init.orthogonal_(dummy_input)
dummy_input = T.cat(
[
dummy_input[0].repeat(5, 1, 1).transpose(0, 1),
dummy_input[1].repeat(5, 1, 1).transpose(0, 1),
dummy_input[2].repeat(5, 1, 1).transpose(0, 1),
]
) # num_speaker x num_utterance x dim
loss = AngleProtoLoss()
output = loss.forward(dummy_input)
assert output.item() < 0.005
# class LoaderTest(unittest.TestCase):
# def test_output(self):
# items = libri_tts("/home/erogol/Data/Libri-TTS/train-clean-360/")
# ap = AudioProcessor(**c['audio'])
# dataset = MyDataset(ap, items, 1.6, 64, 10)
# loader = DataLoader(dataset, batch_size=32, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn)
# count = 0
# for mel, spk in loader:
# print(mel.shape)
# if count == 4:
# break
# count += 1
| 4,515
| 37.271186
| 114
|
py
|
TTS
|
TTS-master/tests/test_wavegrad_layers.py
|
import torch
from TTS.vocoder.layers.wavegrad import PositionalEncoding, FiLM, UBlock, DBlock
from TTS.vocoder.models.wavegrad import Wavegrad
def test_positional_encoding():
layer = PositionalEncoding(50)
inp = torch.rand(32, 50, 100)
nl = torch.rand(32)
o = layer(inp, nl)
assert o.shape[0] == 32
assert o.shape[1] == 50
assert o.shape[2] == 100
assert isinstance(o, torch.FloatTensor)
def test_film():
layer = FiLM(50, 76)
inp = torch.rand(32, 50, 100)
nl = torch.rand(32)
shift, scale = layer(inp, nl)
assert shift.shape[0] == 32
assert shift.shape[1] == 76
assert shift.shape[2] == 100
assert isinstance(shift, torch.FloatTensor)
assert scale.shape[0] == 32
assert scale.shape[1] == 76
assert scale.shape[2] == 100
assert isinstance(scale, torch.FloatTensor)
layer.apply_weight_norm()
layer.remove_weight_norm()
def test_ublock():
inp1 = torch.rand(32, 50, 100)
inp2 = torch.rand(32, 50, 50)
nl = torch.rand(32)
layer_film = FiLM(50, 100)
layer = UBlock(50, 100, 2, [1, 2, 4, 8])
scale, shift = layer_film(inp1, nl)
o = layer(inp2, shift, scale)
assert o.shape[0] == 32
assert o.shape[1] == 100
assert o.shape[2] == 100
assert isinstance(o, torch.FloatTensor)
layer.apply_weight_norm()
layer.remove_weight_norm()
def test_dblock():
inp = torch.rand(32, 50, 130)
layer = DBlock(50, 100, 2)
o = layer(inp)
assert o.shape[0] == 32
assert o.shape[1] == 100
assert o.shape[2] == 65
assert isinstance(o, torch.FloatTensor)
layer.apply_weight_norm()
layer.remove_weight_norm()
def test_wavegrad_forward():
x = torch.rand(32, 1, 20 * 300)
c = torch.rand(32, 80, 20)
noise_scale = torch.rand(32)
model = Wavegrad(in_channels=80,
out_channels=1,
upsample_factors=[5, 5, 3, 2, 2],
upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2],
[1, 2, 4, 8], [1, 2, 4, 8],
[1, 2, 4, 8]])
o = model.forward(x, c, noise_scale)
assert o.shape[0] == 32
assert o.shape[1] == 1
assert o.shape[2] == 20 * 300
assert isinstance(o, torch.FloatTensor)
model.apply_weight_norm()
model.remove_weight_norm()
| 2,362
| 24.408602
| 80
|
py
|
TTS
|
TTS-master/tests/test_vocoder_parallel_wavegan_discriminator.py
|
import numpy as np
import torch
from TTS.vocoder.models.parallel_wavegan_discriminator import ParallelWaveganDiscriminator, ResidualParallelWaveganDiscriminator
def test_pwgan_disciminator():
model = ParallelWaveganDiscriminator(
in_channels=1,
out_channels=1,
kernel_size=3,
num_layers=10,
conv_channels=64,
dilation_factor=1,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
bias=True)
dummy_x = torch.rand((4, 1, 64 * 256))
output = model(dummy_x)
assert np.all(output.shape == (4, 1, 64 * 256))
model.remove_weight_norm()
def test_redisual_pwgan_disciminator():
model = ResidualParallelWaveganDiscriminator(
in_channels=1,
out_channels=1,
kernel_size=3,
num_layers=30,
stacks=3,
res_channels=64,
gate_channels=128,
skip_channels=64,
dropout=0.0,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2})
dummy_x = torch.rand((4, 1, 64 * 256))
output = model(dummy_x)
assert np.all(output.shape == (4, 1, 64 * 256))
model.remove_weight_norm()
| 1,237
| 28.47619
| 128
|
py
|
TTS
|
TTS-master/tests/test_tacotron2_tf_model.py
|
import os
import unittest
import numpy as np
import tensorflow as tf
import torch
from tests import get_tests_input_path
from TTS.tts.tf.models.tacotron2 import Tacotron2
from TTS.tts.tf.utils.tflite import (convert_tacotron2_to_tflite,
load_tflite_model)
from TTS.utils.io import load_config
tf.get_logger().setLevel('INFO')
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
c = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
class TacotronTFTrainTest(unittest.TestCase):
@staticmethod
def generate_dummy_inputs():
chars_seq = torch.randint(0, 24, (8, 128)).long().to(device)
chars_seq_lengths = torch.randint(100, 128, (8, )).long().to(device)
chars_seq_lengths = torch.sort(chars_seq_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
chars_seq = tf.convert_to_tensor(chars_seq.cpu().numpy())
chars_seq_lengths = tf.convert_to_tensor(chars_seq_lengths.cpu().numpy())
mel_spec = tf.convert_to_tensor(mel_spec.cpu().numpy())
return chars_seq, chars_seq_lengths, mel_spec, mel_postnet_spec, mel_lengths,\
stop_targets, speaker_ids
def test_train_step(self):
''' test forward pass '''
chars_seq, chars_seq_lengths, mel_spec, mel_postnet_spec, mel_lengths,\
stop_targets, speaker_ids = self.generate_dummy_inputs()
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(chars_seq.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5)
# training pass
output = model(chars_seq, chars_seq_lengths, mel_spec, training=True)
# check model output shapes
assert np.all(output[0].shape == mel_spec.shape)
assert np.all(output[1].shape == mel_spec.shape)
assert output[2].shape[2] == chars_seq.shape[1]
assert output[2].shape[1] == (mel_spec.shape[1] // model.decoder.r)
assert output[3].shape[1] == (mel_spec.shape[1] // model.decoder.r)
# inference pass
output = model(chars_seq, training=False)
def test_forward_attention(self,):
chars_seq, chars_seq_lengths, mel_spec, mel_postnet_spec, mel_lengths,\
stop_targets, speaker_ids = self.generate_dummy_inputs()
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(chars_seq.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, forward_attn=True)
# training pass
output = model(chars_seq, chars_seq_lengths, mel_spec, training=True)
# check model output shapes
assert np.all(output[0].shape == mel_spec.shape)
assert np.all(output[1].shape == mel_spec.shape)
assert output[2].shape[2] == chars_seq.shape[1]
assert output[2].shape[1] == (mel_spec.shape[1] // model.decoder.r)
assert output[3].shape[1] == (mel_spec.shape[1] // model.decoder.r)
# inference pass
output = model(chars_seq, training=False)
def test_tflite_conversion(self, ): #pylint:disable=no-self-use
model = Tacotron2(num_chars=24,
num_speakers=0,
r=3,
postnet_output_dim=80,
decoder_output_dim=80,
attn_type='original',
attn_win=False,
attn_norm='sigmoid',
prenet_type='original',
prenet_dropout=True,
forward_attn=False,
trans_agent=False,
forward_attn_mask=False,
location_attn=True,
attn_K=0,
separate_stopnet=True,
bidirectional_decoder=False,
enable_tflite=True)
model.build_inference()
convert_tacotron2_to_tflite(model, output_path='test_tacotron2.tflite', experimental_converter=True)
# init tflite model
tflite_model = load_tflite_model('test_tacotron2.tflite')
# fake input
inputs = tf.random.uniform([1, 4], maxval=10, dtype=tf.int32) #pylint:disable=unexpected-keyword-arg
# run inference
# get input and output details
input_details = tflite_model.get_input_details()
output_details = tflite_model.get_output_details()
# reshape input tensor for the new input shape
tflite_model.resize_tensor_input(input_details[0]['index'], inputs.shape) #pylint:disable=unexpected-keyword-arg
tflite_model.allocate_tensors()
detail = input_details[0]
input_shape = detail['shape']
tflite_model.set_tensor(detail['index'], inputs)
# run the tflite_model
tflite_model.invoke()
# collect outputs
decoder_output = tflite_model.get_tensor(output_details[0]['index'])
postnet_output = tflite_model.get_tensor(output_details[1]['index'])
# remove tflite binary
os.remove('test_tacotron2.tflite')
| 5,947
| 42.101449
| 121
|
py
|
TTS
|
TTS-master/tests/test_stft_torch.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/tests/test_vocoder_pqmf.py
|
import os
import torch
import soundfile as sf
from librosa.core import load
from tests import get_tests_path, get_tests_input_path
from TTS.vocoder.layers.pqmf import PQMF
TESTS_PATH = get_tests_path()
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
def test_pqmf():
w, sr = load(WAV_FILE)
layer = PQMF(N=4, taps=62, cutoff=0.15, beta=9.0)
w, sr = load(WAV_FILE)
w2 = torch.from_numpy(w[None, None, :])
b2 = layer.analysis(w2)
w2_ = layer.synthesis(b2)
print(w2_.max())
print(w2_.min())
print(w2_.mean())
sf.write('pqmf_output.wav', w2_.flatten().detach(), sr)
| 626
| 21.392857
| 64
|
py
|
TTS
|
TTS-master/tests/test_vocoder_gan_datasets.py
|
import os
import numpy as np
from tests import get_tests_path, get_tests_input_path, get_tests_output_path
from torch.utils.data import DataLoader
from TTS.utils.audio import AudioProcessor
from TTS.utils.io import load_config
from TTS.vocoder.datasets.gan_dataset import GANDataset
from TTS.vocoder.datasets.preprocess import load_wav_data
file_path = os.path.dirname(os.path.realpath(__file__))
OUTPATH = os.path.join(get_tests_output_path(), "loader_tests/")
os.makedirs(OUTPATH, exist_ok=True)
C = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
test_data_path = os.path.join(get_tests_path(), "data/ljspeech/")
ok_ljspeech = os.path.exists(test_data_path)
def gan_dataset_case(batch_size, seq_len, hop_len, conv_pad, return_segments, use_noise_augment, use_cache, num_workers):
''' run dataloader with given parameters and check conditions '''
ap = AudioProcessor(**C.audio)
_, train_items = load_wav_data(test_data_path, 10)
dataset = GANDataset(ap,
train_items,
seq_len=seq_len,
hop_len=hop_len,
pad_short=2000,
conv_pad=conv_pad,
return_segments=return_segments,
use_noise_augment=use_noise_augment,
use_cache=use_cache)
loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True)
max_iter = 10
count_iter = 0
# return random segments or return the whole audio
if return_segments:
for item1, _ in loader:
feat1, wav1 = item1
# feat2, wav2 = item2
expected_feat_shape = (batch_size, ap.num_mels, seq_len // hop_len + conv_pad * 2)
# check shapes
assert np.all(feat1.shape == expected_feat_shape), f" [!] {feat1.shape} vs {expected_feat_shape}"
assert (feat1.shape[2] - conv_pad * 2) * hop_len == wav1.shape[2]
# check feature vs audio match
if not use_noise_augment:
for idx in range(batch_size):
audio = wav1[idx].squeeze()
feat = feat1[idx]
mel = ap.melspectrogram(audio)
# the first 2 and the last 2 frames are skipped due to the padding
# differences in stft
assert (feat - mel[:, :feat1.shape[-1]])[:, 2:-2].sum() <= 0, f' [!] {(feat - mel[:, :feat1.shape[-1]])[:, 2:-2].sum()}'
count_iter += 1
# if count_iter == max_iter:
# break
else:
for item in loader:
feat, wav = item
expected_feat_shape = (batch_size, ap.num_mels, (wav.shape[-1] // hop_len) + (conv_pad * 2))
assert np.all(feat.shape == expected_feat_shape), f" [!] {feat.shape} vs {expected_feat_shape}"
assert (feat.shape[2] - conv_pad * 2) * hop_len == wav.shape[2]
count_iter += 1
if count_iter == max_iter:
break
def test_parametrized_gan_dataset():
''' test dataloader with different parameters '''
params = [
[32, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, False, True, 0],
[32, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, False, True, 4],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, True, True, 0],
[1, C.audio['hop_length'], C.audio['hop_length'], 0, True, True, True, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, True, True, True, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, False, True, True, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, False, True, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, True, False, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, False, False, False, 0],
]
for param in params:
print(param)
gan_dataset_case(*param)
| 4,221
| 42.979167
| 140
|
py
|
TTS
|
TTS-master/tests/test_tacotron2_model.py
|
import copy
import os
import unittest
import torch
from tests import get_tests_input_path
from torch import nn, optim
from TTS.tts.layers.losses import MSELossMasked
from TTS.tts.models.tacotron2 import Tacotron2
from TTS.utils.io import load_config
from TTS.utils.audio import AudioProcessor
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
c = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
ap = AudioProcessor(**c.audio)
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
class TacotronTrainTest(unittest.TestCase):
def test_train_step(self): # pylint: disable=no-self-use
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(5):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class MultiSpeakeTacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_embeddings = torch.rand(8, 55).to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, speaker_embedding_dim=55).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(5):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_embeddings=speaker_embeddings)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class TacotronGSTTrainTest(unittest.TestCase):
#pylint: disable=no-self-use
def test_train_step(self):
# with random gst mel style
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens']).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(), model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(10):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(), model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
#print(param.grad)
continue
assert (param != param_ref).any(
), "param {} {} with shape {} not updated!! \n{}\n{}".format(
name, count, param.shape, param, param_ref)
count += 1
# with file gst style
mel_spec = torch.FloatTensor(ap.melspectrogram(ap.load_wav(WAV_FILE)))[:, :30].unsqueeze(0).transpose(1, 2).to(device)
mel_spec = mel_spec.repeat(8, 1, 1)
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens']).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(), model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(10):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(), model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
#print(param.grad)
continue
assert (param != param_ref).any(
), "param {} {} with shape {} not updated!! \n{}\n{}".format(
name, count, param.shape, param, param_ref)
count += 1
class SCGSTMultiSpeakeTacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_embeddings = torch.rand(8, 55).to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, speaker_embedding_dim=55, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens'], gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding']).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(5):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_embeddings=speaker_embeddings)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
continue
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
| 14,763
| 49.047458
| 299
|
py
|
TTS
|
TTS-master/tests/test_vocoder_melgan_discriminator.py
|
import numpy as np
import torch
from TTS.vocoder.models.melgan_discriminator import MelganDiscriminator
from TTS.vocoder.models.melgan_multiscale_discriminator import MelganMultiscaleDiscriminator
def test_melgan_discriminator():
model = MelganDiscriminator()
print(model)
dummy_input = torch.rand((4, 1, 256 * 10))
output, _ = model(dummy_input)
assert np.all(output.shape == (4, 1, 10))
def test_melgan_multi_scale_discriminator():
model = MelganMultiscaleDiscriminator()
print(model)
dummy_input = torch.rand((4, 1, 256 * 16))
scores, feats = model(dummy_input)
assert len(scores) == 3
assert len(scores) == len(feats)
assert np.all(scores[0].shape == (4, 1, 64))
assert np.all(feats[0][0].shape == (4, 16, 4096))
assert np.all(feats[0][1].shape == (4, 64, 1024))
assert np.all(feats[0][2].shape == (4, 256, 256))
| 882
| 31.703704
| 92
|
py
|
TTS
|
TTS-master/tests/__init__.py
|
import os
def get_tests_path():
"""Returns the path to the test directory."""
return os.path.dirname(os.path.realpath(__file__))
def get_tests_input_path():
"""Returns the path to the test data directory."""
return os.path.join(get_tests_path(), "inputs")
def get_tests_output_path():
"""Returns the path to the directory for test outputs."""
return os.path.join(get_tests_path(), "outputs")
| 422
| 23.882353
| 61
|
py
|
TTS
|
TTS-master/tests/test_speedy_speech_layers.py
|
import torch
from TTS.tts.layers.speedy_speech.encoder import Encoder
from TTS.tts.layers.speedy_speech.decoder import Decoder
from TTS.tts.layers.speedy_speech.duration_predictor import DurationPredictor
from TTS.tts.utils.generic_utils import sequence_mask
from TTS.tts.models.speedy_speech import SpeedySpeech
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def test_encoder():
input_dummy = torch.rand(8, 14, 37).to(device)
input_lengths = torch.randint(31, 37, (8, )).long().to(device)
input_lengths[-1] = 37
input_mask = torch.unsqueeze(
sequence_mask(input_lengths, input_dummy.size(2)), 1).to(device)
# residual bn conv encoder
layer = Encoder(out_channels=11,
in_hidden_channels=14,
encoder_type='residual_conv_bn').to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
# transformer encoder
layer = Encoder(out_channels=11,
in_hidden_channels=14,
encoder_type='transformer',
encoder_params={
'hidden_channels_ffn': 768,
'num_heads': 2,
"kernel_size": 3,
"dropout_p": 0.1,
"num_layers": 6,
"rel_attn_window_size": 4,
"input_length": None
}).to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
def test_decoder():
input_dummy = torch.rand(8, 128, 37).to(device)
input_lengths = torch.randint(31, 37, (8, )).long().to(device)
input_lengths[-1] = 37
input_mask = torch.unsqueeze(
sequence_mask(input_lengths, input_dummy.size(2)), 1).to(device)
# residual bn conv decoder
layer = Decoder(out_channels=11, in_hidden_channels=128).to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
# transformer decoder
layer = Decoder(out_channels=11,
in_hidden_channels=128,
decoder_type='transformer',
decoder_params={
'hidden_channels_ffn': 128,
'num_heads': 2,
"kernel_size": 3,
"dropout_p": 0.1,
"num_layers": 8,
"rel_attn_window_size": 4,
"input_length": None
}).to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
# wavenet decoder
layer = Decoder(out_channels=11,
in_hidden_channels=128,
decoder_type='wavenet',
decoder_params={
"num_blocks": 12,
"hidden_channels": 192,
"kernel_size": 5,
"dilation_rate": 1,
"num_layers": 4,
"dropout_p": 0.05
}).to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
def test_duration_predictor():
input_dummy = torch.rand(8, 128, 27).to(device)
input_lengths = torch.randint(20, 27, (8, )).long().to(device)
input_lengths[-1] = 27
x_mask = torch.unsqueeze(sequence_mask(input_lengths, input_dummy.size(2)),
1).to(device)
layer = DurationPredictor(hidden_channels=128).to(device)
output = layer(input_dummy, x_mask)
assert list(output.shape) == [8, 1, 27]
def test_speedy_speech():
num_chars = 7
B = 8
T_en = 37
T_de = 74
x_dummy = torch.randint(0, 7, (B, T_en)).long().to(device)
x_lengths = torch.randint(31, T_en, (B, )).long().to(device)
x_lengths[-1] = T_en
# set durations. max total duration should be equal to T_de
durations = torch.randint(1, 4, (B, T_en))
durations = durations * (T_de / durations.sum(1)).unsqueeze(1)
durations = durations.to(torch.long).to(device)
max_dur = durations.sum(1).max()
durations[:, 0] += T_de - max_dur if T_de > max_dur else 0
y_lengths = durations.sum(1)
model = SpeedySpeech(num_chars, out_channels=80, hidden_channels=128)
if use_cuda:
model.cuda()
# forward pass
o_de, o_dr, attn = model(x_dummy, x_lengths, y_lengths, durations)
assert list(o_de.shape) == [B, 80, T_de], f"{list(o_de.shape)}"
assert list(attn.shape) == [B, T_de, T_en]
assert list(o_dr.shape) == [B, T_en]
# with speaker embedding
model = SpeedySpeech(num_chars,
out_channels=80,
hidden_channels=128,
num_speakers=10,
c_in_channels=256).to(device)
model.forward(x_dummy,
x_lengths,
y_lengths,
durations,
g=torch.randint(0, 10, (B,)).to(device))
assert list(o_de.shape) == [B, 80, T_de], f"{list(o_de.shape)}"
assert list(attn.shape) == [B, T_de, T_en]
assert list(o_dr.shape) == [B, T_en]
# with speaker external embedding
model = SpeedySpeech(num_chars,
out_channels=80,
hidden_channels=128,
num_speakers=10,
external_c=True,
c_in_channels=256).to(device)
model.forward(x_dummy,
x_lengths,
y_lengths,
durations,
g=torch.rand((B,256)).to(device))
assert list(o_de.shape) == [B, 80, T_de], f"{list(o_de.shape)}"
assert list(attn.shape) == [B, T_de, T_en]
assert list(o_dr.shape) == [B, T_en]
| 5,880
| 34.005952
| 79
|
py
|
TTS
|
TTS-master/tests/test_vocoder_parallel_wavegan_generator.py
|
import numpy as np
import torch
from TTS.vocoder.models.parallel_wavegan_generator import ParallelWaveganGenerator
def test_pwgan_generator():
model = ParallelWaveganGenerator(
in_channels=1,
out_channels=1,
kernel_size=3,
num_res_blocks=30,
stacks=3,
res_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
dropout=0.0,
bias=True,
use_weight_norm=True,
upsample_factors=[4, 4, 4, 4])
dummy_c = torch.rand((2, 80, 5))
output = model(dummy_c)
assert np.all(output.shape == (2, 1, 5 * 256)), output.shape
model.remove_weight_norm()
output = model.inference(dummy_c)
assert np.all(output.shape == (2, 1, (5 + 4) * 256))
| 767
| 26.428571
| 82
|
py
|
TTS
|
TTS-master/tests/test_wavegrad_train.py
|
import unittest
import numpy as np
import torch
from torch import optim
from TTS.vocoder.models.wavegrad import Wavegrad
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class WavegradTrainTest(unittest.TestCase):
def test_train_step(self): # pylint: disable=no-self-use
"""Test if all layers are updated in a basic training cycle"""
input_dummy = torch.rand(8, 1, 20 * 300).to(device)
mel_spec = torch.rand(8, 80, 20).to(device)
criterion = torch.nn.L1Loss().to(device)
model = Wavegrad(in_channels=80,
out_channels=1,
upsample_factors=[5, 5, 3, 2, 2],
upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2],
[1, 2, 4, 8], [1, 2, 4, 8],
[1, 2, 4, 8]])
model_ref = Wavegrad(in_channels=80,
out_channels=1,
upsample_factors=[5, 5, 3, 2, 2],
upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2],
[1, 2, 4, 8], [1, 2, 4, 8],
[1, 2, 4, 8]])
model.train()
model.to(device)
betas = np.linspace(1e-6, 1e-2, 1000)
model.compute_noise_level(betas)
model_ref.load_state_dict(model.state_dict())
model_ref.to(device)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=0.001)
for i in range(5):
y_hat = model.forward(input_dummy, mel_spec, torch.rand(8).to(device))
optimizer.zero_grad()
loss = criterion(y_hat, input_dummy)
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
| 2,498
| 38.666667
| 82
|
py
|
TTS
|
TTS-master/tests/test_audio.py
|
import os
import unittest
from tests import get_tests_input_path, get_tests_output_path, get_tests_path
from TTS.utils.audio import AudioProcessor
from TTS.utils.io import load_config
TESTS_PATH = get_tests_path()
OUT_PATH = os.path.join(get_tests_output_path(), "audio_tests")
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
os.makedirs(OUT_PATH, exist_ok=True)
conf = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
# pylint: disable=protected-access
class TestAudio(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestAudio, self).__init__(*args, **kwargs)
self.ap = AudioProcessor(**conf.audio)
def test_audio_synthesis(self):
""" 1. load wav
2. set normalization parameters
3. extract mel-spec
4. invert to wav and save the output
"""
print(" > Sanity check for the process wav -> mel -> wav")
def _test(max_norm, signal_norm, symmetric_norm, clip_norm):
self.ap.max_norm = max_norm
self.ap.signal_norm = signal_norm
self.ap.symmetric_norm = symmetric_norm
self.ap.clip_norm = clip_norm
wav = self.ap.load_wav(WAV_FILE)
mel = self.ap.melspectrogram(wav)
wav_ = self.ap.inv_melspectrogram(mel)
file_name = "/audio_test-melspec_max_norm_{}-signal_norm_{}-symmetric_{}-clip_norm_{}.wav"\
.format(max_norm, signal_norm, symmetric_norm, clip_norm)
print(" | > Creating wav file at : ", file_name)
self.ap.save_wav(wav_, OUT_PATH + file_name)
# maxnorm = 1.0
_test(1., False, False, False)
_test(1., True, False, False)
_test(1., True, True, False)
_test(1., True, False, True)
_test(1., True, True, True)
# maxnorm = 4.0
_test(4., False, False, False)
_test(4., True, False, False)
_test(4., True, True, False)
_test(4., True, False, True)
_test(4., True, True, True)
def test_normalize(self):
"""Check normalization and denormalization for range values and consistency """
print(" > Testing normalization and denormalization.")
wav = self.ap.load_wav(WAV_FILE)
wav = self.ap.sound_norm(wav) # normalize audio to get abetter normalization range below.
self.ap.signal_norm = False
x = self.ap.melspectrogram(wav)
x_old = x
self.ap.signal_norm = True
self.ap.symmetric_norm = False
self.ap.clip_norm = False
self.ap.max_norm = 4.0
x_norm = self.ap.normalize(x)
print(f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}")
assert (x_old - x).sum() == 0
# check value range
assert x_norm.max() <= self.ap.max_norm + 1, x_norm.max()
assert x_norm.min() >= 0 - 1, x_norm.min()
# check denorm.
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3, (x - x_).mean()
self.ap.signal_norm = True
self.ap.symmetric_norm = False
self.ap.clip_norm = True
self.ap.max_norm = 4.0
x_norm = self.ap.normalize(x)
print(f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}")
assert (x_old - x).sum() == 0
# check value range
assert x_norm.max() <= self.ap.max_norm, x_norm.max()
assert x_norm.min() >= 0, x_norm.min()
# check denorm.
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3, (x - x_).mean()
self.ap.signal_norm = True
self.ap.symmetric_norm = True
self.ap.clip_norm = False
self.ap.max_norm = 4.0
x_norm = self.ap.normalize(x)
print(f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}")
assert (x_old - x).sum() == 0
# check value range
assert x_norm.max() <= self.ap.max_norm + 1, x_norm.max()
assert x_norm.min() >= -self.ap.max_norm - 2, x_norm.min() #pylint: disable=invalid-unary-operand-type
assert x_norm.min() <= 0, x_norm.min()
# check denorm.
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3, (x - x_).mean()
self.ap.signal_norm = True
self.ap.symmetric_norm = True
self.ap.clip_norm = True
self.ap.max_norm = 4.0
x_norm = self.ap.normalize(x)
print(f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}")
assert (x_old - x).sum() == 0
# check value range
assert x_norm.max() <= self.ap.max_norm, x_norm.max()
assert x_norm.min() >= -self.ap.max_norm, x_norm.min() #pylint: disable=invalid-unary-operand-type
assert x_norm.min() <= 0, x_norm.min()
# check denorm.
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3, (x - x_).mean()
self.ap.signal_norm = True
self.ap.symmetric_norm = False
self.ap.max_norm = 1.0
x_norm = self.ap.normalize(x)
print(f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}")
assert (x_old - x).sum() == 0
assert x_norm.max() <= self.ap.max_norm, x_norm.max()
assert x_norm.min() >= 0, x_norm.min()
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3
self.ap.signal_norm = True
self.ap.symmetric_norm = True
self.ap.max_norm = 1.0
x_norm = self.ap.normalize(x)
print(f" > MaxNorm: {self.ap.max_norm}, ClipNorm:{self.ap.clip_norm}, SymmetricNorm:{self.ap.symmetric_norm}, SignalNorm:{self.ap.signal_norm} Range-> {x_norm.max()} -- {x_norm.min()}")
assert (x_old - x).sum() == 0
assert x_norm.max() <= self.ap.max_norm, x_norm.max()
assert x_norm.min() >= -self.ap.max_norm, x_norm.min() #pylint: disable=invalid-unary-operand-type
assert x_norm.min() < 0, x_norm.min()
x_ = self.ap.denormalize(x_norm)
assert (x - x_).sum() < 1e-3
def test_scaler(self):
scaler_stats_path = os.path.join(get_tests_input_path(), 'scale_stats.npy')
conf.audio['stats_path'] = scaler_stats_path
conf.audio['preemphasis'] = 0.0
conf.audio['do_trim_silence'] = True
conf.audio['signal_norm'] = True
ap = AudioProcessor(**conf.audio)
mel_mean, mel_std, linear_mean, linear_std, _ = ap.load_stats(scaler_stats_path)
ap.setup_scaler(mel_mean, mel_std, linear_mean, linear_std)
self.ap.signal_norm = False
self.ap.preemphasis = 0.0
# test scaler forward and backward transforms
wav = self.ap.load_wav(WAV_FILE)
mel_reference = self.ap.melspectrogram(wav)
mel_norm = ap.melspectrogram(wav)
mel_denorm = ap.denormalize(mel_norm)
assert abs(mel_reference - mel_denorm).max() < 1e-4
| 7,469
| 41.20339
| 194
|
py
|
TTS
|
TTS-master/tests/test_vocoder_wavernn.py
|
import numpy as np
import torch
import random
from TTS.vocoder.models.wavernn import WaveRNN
def test_wavernn():
model = WaveRNN(
rnn_dims=512,
fc_dims=512,
mode=10,
mulaw=False,
pad=2,
use_aux_net=True,
use_upsample_net=True,
upsample_factors=[4, 8, 8],
feat_dims=80,
compute_dims=128,
res_out_dims=128,
num_res_blocks=10,
hop_length=256,
sample_rate=22050,
)
dummy_x = torch.rand((2, 1280))
dummy_m = torch.rand((2, 80, 9))
y_size = random.randrange(20, 60)
dummy_y = torch.rand((80, y_size))
output = model(dummy_x, dummy_m)
assert np.all(output.shape == (2, 1280, 4 * 256)), output.shape
output = model.inference(dummy_y, True, 5500, 550)
assert np.all(output.shape == (256 * (y_size - 1),))
| 850
| 25.59375
| 67
|
py
|
TTS
|
TTS-master/tests/test_vocoder_wavernn_datasets.py
|
import os
import shutil
import numpy as np
from tests import get_tests_path, get_tests_input_path, get_tests_output_path
from torch.utils.data import DataLoader
from TTS.utils.audio import AudioProcessor
from TTS.utils.io import load_config
from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset
from TTS.vocoder.datasets.preprocess import load_wav_feat_data, preprocess_wav_files
file_path = os.path.dirname(os.path.realpath(__file__))
OUTPATH = os.path.join(get_tests_output_path(), "loader_tests/")
os.makedirs(OUTPATH, exist_ok=True)
C = load_config(os.path.join(get_tests_input_path(),
"test_vocoder_wavernn_config.json"))
test_data_path = os.path.join(get_tests_path(), "data/ljspeech/")
test_mel_feat_path = os.path.join(test_data_path, "mel")
test_quant_feat_path = os.path.join(test_data_path, "quant")
ok_ljspeech = os.path.exists(test_data_path)
def wavernn_dataset_case(batch_size, seq_len, hop_len, pad, mode, mulaw, num_workers):
""" run dataloader with given parameters and check conditions """
ap = AudioProcessor(**C.audio)
C.batch_size = batch_size
C.mode = mode
C.seq_len = seq_len
C.data_path = test_data_path
preprocess_wav_files(test_data_path, C, ap)
_, train_items = load_wav_feat_data(
test_data_path, test_mel_feat_path, 5)
dataset = WaveRNNDataset(ap=ap,
items=train_items,
seq_len=seq_len,
hop_len=hop_len,
pad=pad,
mode=mode,
mulaw=mulaw
)
# sampler = DistributedSampler(dataset) if num_gpus > 1 else None
loader = DataLoader(dataset,
shuffle=True,
collate_fn=dataset.collate,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
)
max_iter = 10
count_iter = 0
try:
for data in loader:
x_input, mels, _ = data
expected_feat_shape = (ap.num_mels,
(x_input.shape[-1] // hop_len) + (pad * 2))
assert np.all(
mels.shape[1:] == expected_feat_shape), f" [!] {mels.shape} vs {expected_feat_shape}"
assert (mels.shape[2] - pad * 2) * hop_len == x_input.shape[1]
count_iter += 1
if count_iter == max_iter:
break
# except AssertionError:
# shutil.rmtree(test_mel_feat_path)
# shutil.rmtree(test_quant_feat_path)
finally:
shutil.rmtree(test_mel_feat_path)
shutil.rmtree(test_quant_feat_path)
def test_parametrized_wavernn_dataset():
''' test dataloader with different parameters '''
params = [
[16, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, 10, True, 0],
[16, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, "mold", False, 4],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, 9, False, 0],
[1, C.audio['hop_length'], C.audio['hop_length'], 2, 10, True, 0],
[1, C.audio['hop_length'], C.audio['hop_length'], 2, "mold", False, 0],
[1, C.audio['hop_length'] * 5, C.audio['hop_length'], 4, 10, False, 2],
[1, C.audio['hop_length'] * 5, C.audio['hop_length'], 2, "mold", False, 0],
]
for param in params:
print(param)
wavernn_dataset_case(*param)
| 3,538
| 37.053763
| 101
|
py
|
TTS
|
TTS-master/tests/symbols_tests.py
|
import unittest
from TTS.tts.utils.text import phonemes
class SymbolsTest(unittest.TestCase):
def test_uniqueness(self): #pylint: disable=no-self-use
assert sorted(phonemes) == sorted(list(set(phonemes))), " {} vs {} ".format(len(phonemes), len(set(phonemes)))
| 276
| 33.625
| 118
|
py
|
TTS
|
TTS-master/tests/test_vocoder_tf_pqmf.py
|
import os
import tensorflow as tf
import soundfile as sf
from librosa.core import load
from tests import get_tests_path, get_tests_input_path
from TTS.vocoder.tf.layers.pqmf import PQMF
TESTS_PATH = get_tests_path()
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
def test_pqmf():
w, sr = load(WAV_FILE)
layer = PQMF(N=4, taps=62, cutoff=0.15, beta=9.0)
w, sr = load(WAV_FILE)
w2 = tf.convert_to_tensor(w[None, None, :])
b2 = layer.analysis(w2)
w2_ = layer.synthesis(b2)
w2_ = w2.numpy()
print(w2_.max())
print(w2_.min())
print(w2_.mean())
sf.write('tf_pqmf_output.wav', w2_.flatten(), sr)
| 659
| 21.758621
| 64
|
py
|
TTS
|
TTS-master/notebooks/dataset_analysis/analyze.py
|
# visualisation tools for mimic2
import matplotlib.pyplot as plt
from statistics import stdev, mode, mean, median
from statistics import StatisticsError
import argparse
import os
import csv
import seaborn as sns
import random
from text.cmudict import CMUDict
def get_audio_seconds(frames):
return (frames*12.5)/1000
def append_data_statistics(meta_data):
# get data statistics
for char_cnt in meta_data:
data = meta_data[char_cnt]["data"]
audio_len_list = [d["audio_len"] for d in data]
mean_audio_len = mean(audio_len_list)
try:
mode_audio_list = [round(d["audio_len"], 2) for d in data]
mode_audio_len = mode(mode_audio_list)
except StatisticsError:
mode_audio_len = audio_len_list[0]
median_audio_len = median(audio_len_list)
try:
std = stdev(
d["audio_len"] for d in data
)
except StatisticsError:
std = 0
meta_data[char_cnt]["mean"] = mean_audio_len
meta_data[char_cnt]["median"] = median_audio_len
meta_data[char_cnt]["mode"] = mode_audio_len
meta_data[char_cnt]["std"] = std
return meta_data
def process_meta_data(path):
meta_data = {}
# load meta data
with open(path, 'r') as f:
data = csv.reader(f, delimiter='|')
for row in data:
frames = int(row[2])
utt = row[3]
audio_len = get_audio_seconds(frames)
char_count = len(utt)
if not meta_data.get(char_count):
meta_data[char_count] = {
"data": []
}
meta_data[char_count]["data"].append(
{
"utt": utt,
"frames": frames,
"audio_len": audio_len,
"row": "{}|{}|{}|{}".format(row[0], row[1], row[2], row[3])
}
)
meta_data = append_data_statistics(meta_data)
return meta_data
def get_data_points(meta_data):
x = meta_data
y_avg = [meta_data[d]['mean'] for d in meta_data]
y_mode = [meta_data[d]['mode'] for d in meta_data]
y_median = [meta_data[d]['median'] for d in meta_data]
y_std = [meta_data[d]['std'] for d in meta_data]
y_num_samples = [len(meta_data[d]['data']) for d in meta_data]
return {
"x": x,
"y_avg": y_avg,
"y_mode": y_mode,
"y_median": y_median,
"y_std": y_std,
"y_num_samples": y_num_samples
}
def save_training(file_path, meta_data):
rows = []
for char_cnt in meta_data:
data = meta_data[char_cnt]['data']
for d in data:
rows.append(d['row'] + "\n")
random.shuffle(rows)
with open(file_path, 'w+') as f:
for row in rows:
f.write(row)
def plot(meta_data, save_path=None):
save = False
if save_path:
save = True
graph_data = get_data_points(meta_data)
x = graph_data['x']
y_avg = graph_data['y_avg']
y_std = graph_data['y_std']
y_mode = graph_data['y_mode']
y_median = graph_data['y_median']
y_num_samples = graph_data['y_num_samples']
plt.figure()
plt.plot(x, y_avg, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("avg seconds", fontsize=30)
if save:
name = "char_len_vs_avg_secs"
plt.savefig(os.path.join(save_path, name))
plt.figure()
plt.plot(x, y_mode, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("mode seconds", fontsize=30)
if save:
name = "char_len_vs_mode_secs"
plt.savefig(os.path.join(save_path, name))
plt.figure()
plt.plot(x, y_median, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("median seconds", fontsize=30)
if save:
name = "char_len_vs_med_secs"
plt.savefig(os.path.join(save_path, name))
plt.figure()
plt.plot(x, y_std, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("standard deviation", fontsize=30)
if save:
name = "char_len_vs_std"
plt.savefig(os.path.join(save_path, name))
plt.figure()
plt.plot(x, y_num_samples, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("number of samples", fontsize=30)
if save:
name = "char_len_vs_num_samples"
plt.savefig(os.path.join(save_path, name))
def plot_phonemes(train_path, cmu_dict_path, save_path):
cmudict = CMUDict(cmu_dict_path)
phonemes = {}
with open(train_path, 'r') as f:
data = csv.reader(f, delimiter='|')
phonemes["None"] = 0
for row in data:
words = row[3].split()
for word in words:
pho = cmudict.lookup(word)
if pho:
indie = pho[0].split()
for nemes in indie:
if phonemes.get(nemes):
phonemes[nemes] += 1
else:
phonemes[nemes] = 1
else:
phonemes["None"] += 1
x, y = [], []
for key in phonemes:
x.append(key)
y.append(phonemes[key])
plt.figure()
plt.rcParams["figure.figsize"] = (50, 20)
barplot = sns.barplot(x, y)
if save_path:
fig = barplot.get_figure()
fig.savefig(os.path.join(save_path, "phoneme_dist"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_file_path', required=True,
help='this is the path to the train.txt file that the preprocess.py script creates'
)
parser.add_argument(
'--save_to', help='path to save charts of data to'
)
parser.add_argument(
'--cmu_dict_path', help='give cmudict-0.7b to see phoneme distribution'
)
args = parser.parse_args()
meta_data = process_meta_data(args.train_file_path)
plt.rcParams["figure.figsize"] = (10, 5)
plot(meta_data, save_path=args.save_to)
if args.cmu_dict_path:
plt.rcParams["figure.figsize"] = (30, 10)
plot_phonemes(args.train_file_path, args.cmu_dict_path, args.save_to)
plt.show()
if __name__ == '__main__':
main()
| 6,250
| 27.939815
| 91
|
py
|
layer-norm
|
layer-norm-master/layers.py
|
"""
Layer functions
"""
import theano
import theano.tensor as tensor
import numpy
# layer normalization
def ln(x, b, s):
_eps = 1e-5
output = (x - x.mean(1)[:,None]) / tensor.sqrt((x.var(1)[:,None] + _eps))
output = s[None, :] * output + b[None,:]
return output
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'lngru': ('param_init_lngru', 'lngru_layer'),
'lstm': ('param_init_lstm', 'lstm_layer'),
'lnlstm': ('param_init_lnlstm', 'lnlstm_layer'),
}
def get_layer(name):
"""
Return param init and feedforward functions for the given layer name
"""
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# Feedforward layer
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None, ortho=True):
"""
Affine transformation + point-wise nonlinearity
"""
if nin == None:
nin = options['dim_proj']
if nout == None:
nout = options['dim_proj']
params[_p(prefix,'W')] = norm_weight(nin, nout, ortho=ortho)
params[_p(prefix,'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
"""
Feedforward pass
"""
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
"""
Gated Recurrent Unit (GRU)
"""
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, init_state, options, prefix='gru', mask=None, **kwargs):
"""
Feedforward pass through GRU
"""
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [init_state],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=False,
strict=True)
rval = [rval]
return rval
# LN-GRU layer
def param_init_lngru(options, params, prefix='lngru', nin=None, dim=None):
"""
Gated Recurrent Unit (GRU) with LN
"""
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
# LN parameters
scale_add = 0.0
scale_mul = 1.0
params[_p(prefix,'b1')] = scale_add * numpy.ones((2*dim)).astype('float32')
params[_p(prefix,'b2')] = scale_add * numpy.ones((1*dim)).astype('float32')
params[_p(prefix,'b3')] = scale_add * numpy.ones((2*dim)).astype('float32')
params[_p(prefix,'b4')] = scale_add * numpy.ones((1*dim)).astype('float32')
params[_p(prefix,'s1')] = scale_mul * numpy.ones((2*dim)).astype('float32')
params[_p(prefix,'s2')] = scale_mul * numpy.ones((1*dim)).astype('float32')
params[_p(prefix,'s3')] = scale_mul * numpy.ones((2*dim)).astype('float32')
params[_p(prefix,'s4')] = scale_mul * numpy.ones((1*dim)).astype('float32')
return params
def lngru_layer(tparams, state_below, init_state, options, prefix='lngru', mask=None, one_step=False, **kwargs):
"""
Feedforward pass through GRU with LN
"""
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux, b1, b2, b3, b4, s1, s2, s3, s4):
x_ = ln(x_, b1, s1)
xx_ = ln(xx_, b2, s2)
preact = tensor.dot(h_, U)
preact = ln(preact, b3, s3)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = ln(preactx, b4, s4)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
non_seqs = [tparams[_p(prefix, 'U')], tparams[_p(prefix, 'Ux')]]
non_seqs += [tparams[_p(prefix, 'b1')], tparams[_p(prefix, 'b2')], tparams[_p(prefix, 'b3')], tparams[_p(prefix, 'b4')]]
non_seqs += [tparams[_p(prefix, 's1')], tparams[_p(prefix, 's2')], tparams[_p(prefix, 's3')], tparams[_p(prefix, 's4')]]
if one_step:
rval = _step(*(seqs+[init_state, tparams[_p(prefix, 'U')], tparams[_p(prefix, 'Ux')]]))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [init_state],
non_sequences = non_seqs,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=False,
strict=True)
rval = [rval]
return rval
# LSTM layer init
def param_init_lstm(options,
params,
prefix='lstm',
nin=None,
dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)],
axis=1)
params[prfx(prefix,'W')] = W
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)],
axis=1)
params[prfx(prefix,'U')] = U
params[prfx(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
return params
# LSTM layer
def lstm_layer(tparams, state_below,
options,
prefix='lstm',
mask=None, one_step=False,
init_state=None,
init_memory=None,
nsteps=None,
**kwargs):
if nsteps is None:
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
param = lambda name: tparams[prfx(prefix, name)]
dim = param('U').shape[0]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# initial/previous state
if init_state is None:
if not options['learn_h0']:
init_state = tensor.alloc(0., n_samples, dim)
else:
init_state0 = sharedX(numpy.zeros((options['dim'])),
name=prfx(prefix, "h0"))
init_state = tensor.alloc(init_state0, n_samples, dim)
tparams[prfx(prefix, 'h0')] = init_state0
U = param('U')
b = param('b')
W = param('W')
non_seqs = [U, b, W]
# initial/previous memory
if init_memory is None:
init_memory = tensor.alloc(0., n_samples, dim)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(mask, sbelow, sbefore, cell_before, *args):
preact = dot(sbefore, param('U'))
preact += sbelow
preact += param('b')
i = Sigmoid(_slice(preact, 0, dim))
f = Sigmoid(_slice(preact, 1, dim))
o = Sigmoid(_slice(preact, 2, dim))
c = Tanh(_slice(preact, 3, dim))
c = f * cell_before + i * c
c = mask * c + (1. - mask) * cell_before
h = o * tensor.tanh(c)
h = mask * h + (1. - mask) * sbefore
return h, c
lstm_state_below = dot(state_below, param('W')) + param('b')
if state_below.ndim == 3:
lstm_state_below = lstm_state_below.reshape((state_below.shape[0],
state_below.shape[1],
-1))
if one_step:
mask = mask.dimshuffle(0, 'x')
h, c = _step(mask, lstm_state_below, init_state, init_memory)
rval = [h, c]
else:
if mask.ndim == 3 and mask.ndim == state_below.ndim:
mask = mask.reshape((mask.shape[0], \
mask.shape[1]*mask.shape[2])).dimshuffle(0, 1, 'x')
elif mask.ndim == 2:
mask = mask.dimshuffle(0, 1, 'x')
rval, updates = theano.scan(_step,
sequences=[mask, lstm_state_below],
outputs_info = [init_state,
init_memory],
name=prfx(prefix, '_layers'),
non_sequences=non_seqs,
strict=True,
n_steps=nsteps)
return rval
# LN-LSTM init
def param_init_lnlstm(options,
params,
prefix='lnlstm',
nin=None,
dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)],
axis=1)
params[prfx(prefix,'W')] = W
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)],
axis=1)
params[prfx(prefix,'U')] = U
params[prfx(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
# lateral parameters
scale_add = 0.0
scale_mul = 1.0
params[prfx(prefix,'b1')] = scale_add * numpy.ones((4*dim)).astype('float32')
params[prfx(prefix,'b2')] = scale_add * numpy.ones((4*dim)).astype('float32')
params[prfx(prefix,'b3')] = scale_add * numpy.ones((1*dim)).astype('float32')
params[prfx(prefix,'s1')] = scale_mul * numpy.ones((4*dim)).astype('float32')
params[prfx(prefix,'s2')] = scale_mul * numpy.ones((4*dim)).astype('float32')
params[prfx(prefix,'s3')] = scale_mul * numpy.ones((1*dim)).astype('float32')
return params
# LN-LSTM layer
def lnlstm_layer(tparams, state_below,
options,
prefix='lnlstm',
mask=None, one_step=False,
init_state=None,
init_memory=None,
nsteps=None,
**kwargs):
if nsteps is None:
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
param = lambda name: tparams[prfx(prefix, name)]
dim = param('U').shape[0]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# initial/previous state
if init_state is None:
if not options['learn_h0']:
init_state = tensor.alloc(0., n_samples, dim)
else:
init_state0 = sharedX(numpy.zeros((options['dim'])),
name=prfx(prefix, "h0"))
init_state = tensor.alloc(init_state0, n_samples, dim)
tparams[prfx(prefix, 'h0')] = init_state0
U = param('U')
b = param('b')
W = param('W')
non_seqs = [U, b, W]
non_seqs.extend(list(map(param, "b1 b2 b3 s1 s2 s3".split())))
# initial/previous memory
if init_memory is None:
init_memory = tensor.alloc(0., n_samples, dim)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(mask, sbelow, sbefore, cell_before, *args):
sbelow_ = ln(sbelow, param('b1'), param('s1'))
sbefore_ = ln(dot(sbefore, param('U')), param('b2'), param('s2'))
preact = sbefore_ + sbelow_ + param('b')
i = Sigmoid(_slice(preact, 0, dim))
f = Sigmoid(_slice(preact, 1, dim))
o = Sigmoid(_slice(preact, 2, dim))
c = Tanh(_slice(preact, 3, dim))
c = f * cell_before + i * c
c = mask * c + (1. - mask) * cell_before
c_ = ln(c, param('b3'), param('s3'))
h = o * tensor.tanh(c_)
h = mask * h + (1. - mask) * sbefore
return h, c
lstm_state_below = dot(state_below, param('W')) + param('b')
if state_below.ndim == 3:
lstm_state_below = lstm_state_below.reshape((state_below.shape[0],
state_below.shape[1],
-1))
if one_step:
mask = mask.dimshuffle(0, 'x')
h, c = _step(mask, lstm_state_below, init_state, init_memory)
rval = [h, c]
else:
if mask.ndim == 3 and mask.ndim == state_below.ndim:
mask = mask.reshape((mask.shape[0], \
mask.shape[1]*mask.shape[2])).dimshuffle(0, 1, 'x')
elif mask.ndim == 2:
mask = mask.dimshuffle(0, 1, 'x')
rval, updates = theano.scan(_step,
sequences=[mask, lstm_state_below],
outputs_info = [init_state,
init_memory],
name=prfx(prefix, '_layers'),
non_sequences=non_seqs,
strict=True,
n_steps=nsteps)
return rval
| 16,949
| 32.697813
| 124
|
py
|
Python-Annotator-for-VideoS
|
Python-Annotator-for-VideoS-master/pavs.py
|
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QLineEdit, QComboBox, QFileDialog, QStyleFactory, QHBoxLayout, QLabel, QSizePolicy, QSlider, QStyle, QVBoxLayout, QWidget, QStatusBar, QTableWidget, QVBoxLayout, QTableWidgetItem, QHBoxLayout, QSplitter, QGroupBox, QFormLayout, QAction, QGridLayout, QShortcut
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5 import QtCore, Qt, QtGui
from PyQt5.QtCore import QRect, QSize, Qt, QUrl, QDir, QTime, pyqtSlot
from PyQt5.QtGui import QFont, QPixmap, QImage, QColor, QPainter, QPen, QKeySequence, QStandardItemModel
import os
import csv
import sys
import numpy as np
audio_extensions = [".wav", ".mp3"]
video_extensions = [".avi", ".mp4", ".mkv"]
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.title = "Python Annotator for VideoS"
# self.top = 100
# self.left = 100
# self.width = 300
# self.height = 400
# self.setWindowState = "Qt.WindowMaximized"
iconName = "home.png"
self.InitWindow()
def InitWindow(self):
self.setWindowTitle(self.title)
# self.setWindowIcon(QtGui.QIcon(iconName))
self.setWindowState(QtCore.Qt.WindowMaximized)
self.UiComponents()
self.show()
def UiComponents(self):
self.rowNo = 1
self.colNo = 0
self.fName = ""
self.fName2 = ""
self.fileNameExist = ""
self.dropDownName = ""
self.model = QStandardItemModel()
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.tableWidget = QTableWidget()
self.tableWidget.cellClicked.connect(self.checkTableFrame)
self.videoWidget = QVideoWidget()
self.frameID=0
self.insertBaseRow()
openButton = QPushButton("Open...")
openButton.clicked.connect(self.openFile)
self.playButton = QPushButton()
self.playButton.setEnabled(False)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.play)
self.lbl = QLabel('00:00:00')
self.lbl.setFixedWidth(60)
self.lbl.setUpdatesEnabled(True)
# self.lbl.setStyleSheet(stylesheet(self))
self.elbl = QLabel('00:00:00')
self.elbl.setFixedWidth(60)
self.elbl.setUpdatesEnabled(True)
# self.elbl.setStyleSheet(stylesheet(self))
self.nextButton = QPushButton("-->")
self.nextButton.clicked.connect(self.next)
self.delButton = QPushButton("Delete")
self.delButton.clicked.connect(self.delete)
self.exportButton = QPushButton("Export")
self.exportButton.clicked.connect(self.export)
self.importButton = QPushButton("Import")
self.importButton.clicked.connect(self.importCSV)
# self.ctr = QLineEdit()
# self.ctr.setPlaceholderText("Extra")
self.startTime = QLineEdit()
self.startTime.setPlaceholderText("Select Start Time")
self.endTime = QLineEdit()
self.endTime.setPlaceholderText("Select End Time")
self.iLabel = QComboBox(self)
self.iLabel.addItem("1. Eye Contact")
self.iLabel.addItem("2. Pointing")
self.iLabel.addItem("3. Response to Names")
self.iLabel.addItem("4. Following Pointing")
self.iLabel.addItem("5. Babbling")
self.iLabel.addItem("6. Question-Answering")
self.iLabel.addItem("7. Showing")
self.iLabel.addItem("8. Following Instructions")
self.iLabel.activated[str].connect(self.style_choice)
# self.iLabel = QLineEdit()
# self.iLabel.setPlaceholderText("Label")
self.positionSlider = QSlider(Qt.Horizontal)
self.positionSlider.setRange(0, 100)
self.positionSlider.sliderMoved.connect(self.setPosition)
self.positionSlider.sliderMoved.connect(self.handleLabel)
self.positionSlider.setSingleStep(2)
self.positionSlider.setPageStep(20)
self.positionSlider.setAttribute(Qt.WA_TranslucentBackground, True)
self.errorLabel = QLabel()
self.errorLabel.setSizePolicy(QSizePolicy.Preferred,
QSizePolicy.Maximum)
# Main plotBox
plotBox = QHBoxLayout()
controlLayout = QHBoxLayout()
# controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.addWidget(openButton)
controlLayout.addWidget(self.playButton)
controlLayout.addWidget(self.lbl)
controlLayout.addWidget(self.positionSlider)
controlLayout.addWidget(self.elbl)
wid = QWidget(self)
self.setCentralWidget(wid)
# Left Layout{
# layout.addWidget(self.videoWidget)
layout = QVBoxLayout()
layout.addWidget(self.videoWidget, 3)
# layout.addLayout(self.grid_root)
layout.addLayout(controlLayout)
layout.addWidget(self.errorLabel)
plotBox.addLayout(layout, 5)
# }
# Right Layout {
inputFields = QHBoxLayout()
inputFields.addWidget(self.startTime)
inputFields.addWidget(self.endTime)
inputFields.addWidget(self.iLabel)
# inputFields.addWidget(self.ctr)
feats = QHBoxLayout()
feats.addWidget(self.nextButton)
feats.addWidget(self.delButton)
feats.addWidget(self.exportButton)
feats.addWidget(self.importButton)
layout2 = QVBoxLayout()
layout2.addWidget(self.tableWidget)
layout2.addLayout(inputFields, 1)
layout2.addLayout(feats, 2)
# layout2.addWidget(self.nextButton)
# }
plotBox.addLayout(layout2, 2)
# self.setLayout(layout)
wid.setLayout(plotBox)
self.shortcut = QShortcut(QKeySequence("["), self)
self.shortcut.activated.connect(self.addStartTime)
self.shortcut = QShortcut(QKeySequence("]"), self)
self.shortcut.activated.connect(self.addEndTime)
self.shortcut = QShortcut(QKeySequence("L"), self)
self.shortcut.activated.connect(self.openFile)
self.shortcut = QShortcut(QKeySequence("C"), self)
self.shortcut.activated.connect(self.clearTable)
self.shortcut = QShortcut(QKeySequence(Qt.Key_Right), self)
self.shortcut.activated.connect(self.forwardSlider)
self.shortcut = QShortcut(QKeySequence(Qt.Key_Left), self)
self.shortcut.activated.connect(self.backSlider)
self.shortcut = QShortcut(QKeySequence(Qt.Key_Up), self)
self.shortcut.activated.connect(self.volumeUp)
self.shortcut = QShortcut(QKeySequence(Qt.Key_Down), self)
self.shortcut.activated.connect(self.volumeDown)
self.shortcut = QShortcut(QKeySequence(Qt.ShiftModifier + Qt.Key_Right) , self)
self.shortcut.activated.connect(self.forwardSlider10)
self.shortcut = QShortcut(QKeySequence(Qt.ShiftModifier + Qt.Key_Left) , self)
self.shortcut.activated.connect(self.backSlider10)
self.mediaPlayer.setVideoOutput(self.videoWidget)
self.mediaPlayer.stateChanged.connect(self.mediaStateChanged)
self.mediaPlayer.positionChanged.connect(self.positionChanged)
self.mediaPlayer.positionChanged.connect(self.handleLabel)
self.mediaPlayer.durationChanged.connect(self.durationChanged)
self.mediaPlayer.error.connect(self.handleError)
def openFile(self):
fileName, _ = QFileDialog.getOpenFileName(self, "Open Movie",
QDir.homePath())
if fileName != '':
self.fileNameExist = fileName
self.mediaPlayer.setMedia(
QMediaContent(QUrl.fromLocalFile(fileName)))
self.playButton.setEnabled(True)
self.videopath = QUrl.fromLocalFile(fileName)
self.errorLabel.setText(fileName)
self.errorLabel.setStyleSheet('color: black')
def play(self):
# self.is_playing_video = not self.is_playing_video
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
else:
self.mediaPlayer.play()
# self._play_video()
# self.errorLabel.setText("Start: " + " -- " + " End:")
def _play_video(self):
if self.is_playing_video and self.video_fps:
frame_idx = min(self.render_frame_idx+1, self.frame_count)
print(frame_idx)
if frame_idx == self.frame_count:
self.on_play_video_clicked()
else:
self.target_frame_idx = frame_idx
def style_choice(self, text):
self.dropDownName = text
QApplication.setStyle(QStyleFactory.create(text))
def addStartTime(self):
self.startTime.setText(self.lbl.text())
def addEndTime(self):
self.endTime.setText(self.lbl.text())
def next(self):
self.tableWidget.setItem(self.rowNo, self.colNo, QTableWidgetItem(self.startTime.text()))
self.colNo += 1
self.tableWidget.setItem(self.rowNo, self.colNo, QTableWidgetItem(self.endTime.text()))
self.colNo += 1
self.tableWidget.setItem(self.rowNo, self.colNo, QTableWidgetItem(str(self.iLabel.currentIndex()+1)))
self.colNo += 1
self.tableWidget.setItem(self.rowNo, self.colNo, QTableWidgetItem(self.iLabel.currentText().split(' ', 1)[1]))
self.colNo = 0
self.rowNo += 1
# print(self.ctr.text(), self.startTime.text(), self.iLabel.text(), self.rowNo, self.colNo)
# print(self.iLabel.currentIndex())
def delete(self):
# print("delete")
index_list = []
for model_index in self.tableWidget.selectionModel().selectedRows():
index = QtCore.QPersistentModelIndex(model_index)
index_list.append(index)
self.rowNo = self.rowNo - len(index_list)
for index in index_list:
self.tableWidget.removeRow(index.row())
def clearTable(self):
while self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(0)
self.insertBaseRow()
print("Clearing")
def export(self):
if self.fileNameExist:
self.fName = ((self.fileNameExist.rsplit('/', 1)[1]).rsplit('.',1))[0]
path, _ = QFileDialog.getSaveFileName(self, 'Save File', QDir.homePath() + "/"+self.fName+".csv", "CSV Files(*.csv *.txt)")
if path:
with open(path, 'w') as stream:
print("saving", path)
writer = csv.writer(stream)
# writer = csv.writer(stream, delimiter=self.delimit)
for row in range(self.tableWidget.rowCount()):
rowdata = []
for column in range(self.tableWidget.columnCount()):
item = self.tableWidget.item(row, column)
if item != None and item != "":
rowdata.append(item.text())
else:
break
writer.writerow(rowdata)
# self.isChanged = False
# self.setCurrentFile(path)
def importCSV(self):
# if fName2 != "":
# self.fName2 = ((self.fileNameExist.rsplit('/', 1)[1]).rsplit('.',1))[0]
# path, _ = QFileDialog.getSaveFileName(self, 'Save File', QDir.homePath() + "/"+self.fName2+".csv", "CSV Files(*.csv *.txt)")
# else:
self.clearTable()
path, _ = QFileDialog.getOpenFileName(self, 'Save File', QDir.homePath() , "CSV Files(*.csv *.txt)")
print(path)
if path:
with open(path, 'r') as stream:
print("loading", path)
reader = csv.reader(stream)
# reader = csv.reader(stream, delimiter=';', quoting=csv.QUOTE_ALL)
# reader = csv.reader(stream, delimiter=';', quoting=csv.QUOTE_ALL)
# for row in reader:
for i, row in enumerate(reader):
if i == 0:
continue
else:
if(len(row) == 4):
st, et, li, ln = row
self.tableWidget.setItem(self.rowNo, self.colNo, QTableWidgetItem(st))
self.colNo += 1
self.tableWidget.setItem(self.rowNo, self.colNo, QTableWidgetItem(et))
self.colNo += 1
self.tableWidget.setItem(self.rowNo, self.colNo, QTableWidgetItem(str(li)))
self.colNo += 1
self.tableWidget.setItem(self.rowNo, self.colNo, QTableWidgetItem(ln))
self.rowNo += 1
self.colNo = 0
def insertBaseRow(self):
self.tableWidget.setColumnCount(4) #, Start Time, End Time, TimeStamp
self.tableWidget.setRowCount(50)
self.rowNo = 1
self.colNo = 0
self.tableWidget.setItem(0, 0, QTableWidgetItem("Start Time"))
self.tableWidget.setItem(0, 1, QTableWidgetItem("End Time"))
self.tableWidget.setItem(0, 2, QTableWidgetItem("Label Index"))
self.tableWidget.setItem(0, 3, QTableWidgetItem("Label Name"))
def checkTableFrame(self, row, column):
if ((row > 0) and (column < 2)):
# print("Row %d and Column %d was clicked" % (row, column))
item = self.tableWidget.item(row, column)
if (item != (None and "")):
try:
itemFrame = item.text()
itemFrame = itemFrame.split(":")
frameTime = int(itemFrame[2]) + int(itemFrame[1])*60 + int(itemFrame[0])*3600
elblFrames = self.elbl.text().split(":")
elblFrameTime = int(elblFrames[2]) + int(elblFrames[1])*60 + int(elblFrames[0])*3600
# print("Elbl FT ", str(elblFrameTime))
# print("FT ", str(frameTime))
# print(frameTime)
self.mediaPlayer.setPosition(frameTime*1000+1*60)
except:
self.errorLabel.setText("Some Video Error - Please Recheck Video Imported!")
self.errorLabel.setStyleSheet('color: red')
def mediaStateChanged(self, state):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
else:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
def positionChanged(self, position):
self.positionSlider.setValue(position)
def durationChanged(self, duration):
self.positionSlider.setRange(0, duration)
mtime = QTime(0,0,0,0)
mtime = mtime.addMSecs(self.mediaPlayer.duration())
self.elbl.setText(mtime.toString())
def setPosition(self, position):
self.mediaPlayer.setPosition(position)
def handleError(self):
self.playButton.setEnabled(False)
self.errorLabel.setText("Error: " + self.mediaPlayer.errorString())
self.errorLabel.setStyleSheet('color: red')
def forwardSlider(self):
self.mediaPlayer.setPosition(self.mediaPlayer.position() + 1*60)
def forwardSlider10(self):
self.mediaPlayer.setPosition(self.mediaPlayer.position() + 1000*60)
def backSlider(self):
self.mediaPlayer.setPosition(self.mediaPlayer.position() - 1*60)
def backSlider10(self):
self.mediaPlayer.setPosition(self.mediaPlayer.position() - 1000*60)
def volumeUp(self):
self.mediaPlayer.setVolume(self.mediaPlayer.volume() + 10)
print("Volume: " + str(self.mediaPlayer.volume()))
def volumeDown(self):
self.mediaPlayer.setVolume(self.mediaPlayer.volume() - 10)
print("Volume: " + str(self.mediaPlayer.volume()))
# def mouseMoveEvent(self, event):
# if event.buttons() == Qt.LeftButton:
# self.move(event.globalPos() \- QPoint(self.frameGeometry().width() / 2, \
# self.frameGeometry().height() / 2))
# event.accept()
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
##################### update Label ##################################
def handleLabel(self):
self.lbl.clear()
mtime = QTime(0,0,0,0)
self.time = mtime.addMSecs(self.mediaPlayer.position())
self.lbl.setText(self.time.toString())
def dropEvent(self, event):
f = str(event.mimeData().urls()[0].toLocalFile())
self.loadFilm(f)
def clickFile(self):
print("File Clicked")
def clickExit(self):
sys.exit()
App = QApplication(sys.argv)
window = Window()
sys.exit(App.exec())
| 16,987
| 37.874142
| 327
|
py
|
ssqueezepy
|
ssqueezepy-master/setup.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2020 John Muradeli
# Licensed under the terms of the MIT License
# (see ssqueezepy/__init__.py for details)
"""
ssqueezepy
==========
Synchrosqueezing, wavelet transforms, and time-frequency analysis in Python
ssqueezepy features time-frequency analysis written for performance, flexibility,
and clarity. Included are Continuous Wavelet Transform (CWT), Short-Time Fourier
Transform (STFT), CWT & STFT synchrosqueezing, Generalized Morse Wavelets,
visualizations, a signal testing suite, and automatic ridge extraction.
"""
import os
import re
from setuptools import setup, find_packages
current_path = os.path.abspath(os.path.dirname(__file__))
def read_file(*parts):
with open(os.path.join(current_path, *parts), encoding='utf-8') as reader:
return reader.read()
def get_requirements(*parts):
with open(os.path.join(current_path, *parts), encoding='utf-8') as reader:
return list(map(lambda x: x.strip(), reader.readlines()))
def find_version(*file_paths):
version_file = read_file(*file_paths)
version_matched = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_matched:
return version_matched.group(1)
raise RuntimeError('Unable to find version')
setup(
name="ssqueezepy",
version=find_version('ssqueezepy', '__init__.py'),
packages=find_packages(exclude=['tests', 'examples']),
url="https://github.com/OverLordGoldDragon/ssqueezepy",
license="MIT",
author="John Muradeli",
author_email="john.muradeli@gmail.com",
description=("Synchrosqueezing, wavelet transforms, and "
"time-frequency analysis in Python"),
long_description=read_file('README.md'),
long_description_content_type="text/markdown",
keywords=(
"signal-processing python synchrosqueezing wavelet-transform cwt stft "
"morse-wavelet ridge-extraction time-frequency time-frequency-analysis "
"visualization"
),
install_requires=get_requirements('requirements.txt'),
tests_require=["pytest>=4.0", "pytest-cov"],
include_package_data=True,
zip_safe=True,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Topic :: Utilities",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 3,036
| 34.729412
| 81
|
py
|
ssqueezepy
|
ssqueezepy-master/examples/scales_selection.py
|
# -*- coding: utf-8 -*-
"""Shows methods to use for CWT scales selection; also see their docstrings."""
if __name__ != '__main__':
raise Exception("ran example file as non-main")
import numpy as np
from ssqueezepy import ssq_cwt, Wavelet
from ssqueezepy.visuals import imshow, plot
from ssqueezepy.utils import cwt_scalebounds, make_scales, p2up
from ssqueezepy.utils import logscale_transition_idx
#%%# Helper visual method ####################################################
def viz(wavelet, scales, scaletype, show_last, nv):
plot(scales, show=1, title="scales | scaletype=%s, nv=%s" % (scaletype, nv))
if scaletype == 'log-piecewise':
extra = ", logscale_transition_idx=%s" % logscale_transition_idx(scales)
else:
extra = ""
print("n_scales={}, max(scales)={:.1f}{}".format(
len(scales), scales.max(), extra))
psih = wavelet(scale=scales)
last_psihs = psih[-show_last:]
# find xmax of plot
least_large = last_psihs[0]
mx_idx = np.argmax(least_large)
last_nonzero_idx = np.where(least_large[mx_idx:] < least_large.max()*.1)[0][0]
last_nonzero_idx += mx_idx + 2
plot(last_psihs.T[:last_nonzero_idx], color='tab:blue', show=1,
title="Last %s largest scales" % show_last)
#%%# EDIT HERE ###############################################################
# signal length
N = 2048
# your signal here
t = np.linspace(0, 1, N, endpoint=False)
x = np.cos(2*np.pi * 16 * t) + np.sin(2*np.pi * 64 * t)
# choose wavelet
wavelet = 'gmw'
# choose padding scheme for CWT (doesn't affect scales selection)
padtype = 'reflect'
# one of: 'log', 'log-piecewise', 'linear'
# 'log-piecewise' lowers low-frequency redundancy; see
# https://github.com/OverLordGoldDragon/ssqueezepy/issues/29#issuecomment-778526900
scaletype = 'log-piecewise'
# one of: 'minimal', 'maximal', 'naive' (not recommended)
preset = 'maximal'
# number of voices (wavelets per octave); more = more scales
nv = 32
# downsampling factor for higher scales (used only if `scaletype='log-piecewise'`)
downsample = 4
# show this many of lowest-frequency wavelets
show_last = 20
#%%## Make scales ############################################################
# `cwt` uses `p2up`'d N internally
M = p2up(N)[0]
wavelet = Wavelet(wavelet, N=M)
min_scale, max_scale = cwt_scalebounds(wavelet, N=len(x), preset=preset)
scales = make_scales(N, min_scale, max_scale, nv=nv, scaletype=scaletype,
wavelet=wavelet, downsample=downsample)
#%%# Visualize scales ########################################################
viz(wavelet, scales, scaletype, show_last, nv)
wavelet.viz('filterbank', scales=scales)
#%%# Show applied ############################################################
Tx, Wx, ssq_freqs, scales, *_ = ssq_cwt(x, wavelet, scales=scales,
padtype=padtype)
imshow(Wx, abs=1, title="abs(CWT)")
imshow(Tx, abs=1, title="abs(SSQ_CWT)")
| 2,939
| 37.181818
| 83
|
py
|
ssqueezepy
|
ssqueezepy-master/examples/benchmarks.py
|
# -*- coding: utf-8 -*-
if __name__ != '__main__':
raise Exception("ran example file as non-main")
import os
import numpy as np
import gc
import pandas as pd
import scipy.signal as sig
import librosa
from pywt import cwt as pcwt
from timeit import timeit as _timeit
from ssqueezepy import cwt, stft, ssq_cwt, ssq_stft, Wavelet
from ssqueezepy.utils import process_scales, padsignal
from ssqueezepy.ssqueezing import _compute_associated_frequencies
def timeit(fn, number=10):
return _timeit(fn, number=number) / number
#%%# Bench funcs #############################################################
def print_report(header, times):
print(("{}\n"
"CWT: {:.3f} sec\n"
"STFT: {:.3f} sec\n"
"SSQ_CWT: {:.3f} sec\n"
"SSQ_STFT: {:.3f} sec\n"
).format(header, *list(times.values())[-4:]))
def time_ssq_cwt(x, dtype, scales, cache_wavelet, ssq_freqs):
wavelet = Wavelet(dtype=dtype)
kw = dict(wavelet=wavelet, scales=scales, ssq_freqs=ssq_freqs)
if cache_wavelet:
for _ in range(3): # warmup run
_ = ssq_cwt(x, cache_wavelet=True, **kw)
del _; gc.collect()
return timeit(lambda: ssq_cwt(x, cache_wavelet=cache_wavelet, **kw))
def time_ssq_stft(x, dtype, n_fft):
for _ in range(3):
_ = ssq_stft(x, dtype=dtype, n_fft=n_fft)
del _; gc.collect()
return timeit(lambda: ssq_stft(x, dtype=dtype, n_fft=n_fft))
def time_cwt(x, dtype, scales, cache_wavelet):
wavelet = Wavelet(dtype=dtype)
if cache_wavelet:
for _ in range(3): # warmup run
_ = cwt(x, wavelet, scales=scales, cache_wavelet=True)
del _; gc.collect()
return timeit(lambda: cwt(x, wavelet, scales=scales,
cache_wavelet=cache_wavelet))
def time_stft(x, dtype, n_fft):
for _ in range(3):
_ = stft(x, dtype=dtype, n_fft=n_fft)
del _; gc.collect()
return timeit(lambda: stft(x, dtype=dtype, n_fft=n_fft))
def time_all(x, dtype, scales, cache_wavelet, ssq_freqs, n_fft):
num = str(len(x))[:-3] + 'k'
return {num: '',
f'{num}-cwt': time_cwt(x, dtype, scales, cache_wavelet),
f'{num}-stft': time_stft(x, dtype, n_fft),
f'{num}-ssq_cwt': time_ssq_cwt(x, dtype, scales, cache_wavelet,
ssq_freqs),
f'{num}-ssq_stft': time_ssq_stft(x, dtype, n_fft)
}
#%%# Setup ###################################################################
# warmup
x = np.random.randn(1000)
for dtype in ('float32', 'float64'):
wavelet = Wavelet(dtype=dtype)
_ = ssq_cwt(x, wavelet, cache_wavelet=False)
_ = ssq_stft(x, dtype=dtype)
del _, wavelet
#%%# Prepare reusable parameters such that STFT & CWT output shapes match ####
N0, N1 = 10000, 160000 # selected such that CWT pad length ratios are same
n_rows = 300
n_fft = n_rows * 2 - 2
wavelet = Wavelet()
scales = process_scales('log-piecewise', N1, wavelet=wavelet)[:n_rows]
ssq_freqs = _compute_associated_frequencies(
scales, N1, wavelet, 'log-piecewise', maprange='peak',
was_padded=True, dt=1, transform='cwt')
kw = dict(scales=scales, ssq_freqs=ssq_freqs, n_fft=n_fft)
t_all = {}
#%%# Baseline ################################################################
print("// BASELINE (dtype=float32, cache_wavelet=True)")
os.environ['SSQ_PARALLEL'] = '0'
os.environ['SSQ_GPU'] = '0'
t_all['base'] = {}
dtype = 'float32'
for N in (N0, N1):
x = np.random.randn(N)
t_all['base'].update(time_all(x, dtype=dtype, cache_wavelet=True, **kw))
print_report(f"/ N={N}", t_all['base'])
#%%# Parallel + wavelet cache ################################################
print("// PARALLEL + CACHE (dtype=float32, cache_wavelet=True)")
os.environ['SSQ_PARALLEL'] = '1'
os.environ['SSQ_GPU'] = '0'
t_all['parallel'] = {}
for N in (N0, N1):
x = np.random.randn(N)
t_all['parallel'].update(time_all(x, dtype='float32', cache_wavelet=True,
**kw))
print_report(f"/ N={N}", t_all['parallel'])
#%%# GPU + wavelet cache #####################################################
print("// GPU + CACHE (dtype=float32, cache_wavelet=True)")
os.environ['SSQ_GPU'] = '1'
t_all['gpu'] = {}
for N in (N0, N1):
x = np.random.randn(N)
t_all['gpu'].update(time_all(x, dtype='float32', cache_wavelet=True, **kw))
print_report(f"/ N={N}", t_all['gpu'])
#%%
df = pd.DataFrame(t_all)
print(df)
#%% PyWavelets ###############################################################
for N in (N0, N1):
x = np.random.randn(N)
xp = padsignal(x)
t = timeit(lambda: pcwt(xp, wavelet='cmor1.5-1.0', scales=scales,
method='fft'))
print("pywt_cwt-%s:" % N, t)
#%% Scipy
for N in (N0, N1):
x = np.random.randn(N)
xp = padsignal(x)
t = timeit(lambda: sig.cwt(xp, wavelet=sig.morlet,
widths=np.arange(4, 4 + len(scales))))
print("scipy_cwt-%s:" % N, t)
#%%
for N in (N0, N1):
x = np.random.randn(N)
t = timeit(lambda: sig.stft(x, nperseg=n_fft, nfft=n_fft, noverlap=n_fft-1))
print("scipy_stft-%s:" % N, t)
#%% Librosa
# NOTE: we bench here with float64 since float32 is slower for librosa as of 0.8.0
for N in (N0, N1):
x = np.random.randn(N)
t = timeit(lambda: librosa.stft(x, n_fft=n_fft, hop_length=1, dtype='float64'))
print("librosa_stft-%s:" % N, t)
#%%#
"""
i7-7700HQ, GTX 1070
base parallel gpu
10k
10k-cwt 0.126293 0.046184 0.003928
10k-stft 0.1081 0.038459 0.005337
10k-ssq_cwt 0.372002 0.147907 0.009412
10k-ssq_stft 0.282463 0.146660 0.027790
160k
160k-cwt 2.985540 1.252456 0.036721
160k-stft 1.657803 0.418435 0.064341
160k-ssq_cwt 8.384496 3.157575 0.085638
160k-ssq_stft 4.649919 2.483205 0.159171
pywt_cwt-10000: 3.5802361100000097
pywt_cwt-160000: 12.683934910000016
scipy_cwt-10000: 0.5228888900000129
scipy_cwt-160000: 10.741505060000009
scipy_stft-10000: 0.11830254000001332
scipy_stft-160000: 1.92775223000001
librosa_stft-10000: 0.09094287000000259
librosa_stft-160000: 1.383814400000001
"""
| 6,211
| 32.219251
| 83
|
py
|
ssqueezepy
|
ssqueezepy-master/examples/se_ans0.py
|
# -*- coding: utf-8 -*-
"""Code for https://dsp.stackexchange.com/a/71399/50076
"""
if __name__ != '__main__':
raise Exception("ran example file as non-main")
import numpy as np
import matplotlib.pyplot as plt
from ssqueezepy import ssq_cwt, cwt
from ssqueezepy.visuals import plot, imshow
#%%# Signal generators #######################################################
def _t(min, max, N):
return np.linspace(min, max, N, False)
def cos_f(freqs, N=128, phi=0):
return np.concatenate([np.cos(2 * np.pi * f * (_t(i, i + 1, N) + phi))
for i, f in enumerate(freqs)])
#%%## Configure, compute, plot ###############################################
wavelet = ('morlet', {'mu': 5})
f, N = 12, 512
x = cos_f([f], N=N)
Wx, scales, *_ = cwt(x, wavelet, fs=N)
#%%# Show, print max row
imshow(Wx, abs=1, yticks=scales, title="f=%d, N=%d" % (f, N), show=1,
cmap='bone')
mxidx = np.where(np.abs(Wx) == np.abs(Wx).max())[0][0]
print("Max row idx:", mxidx, flush=True)
#%%# Plot aroundI max row
idxs = slice(mxidx - 30, mxidx + 20)
Wxz = Wx[idxs]
imshow(Wxz, abs=1, title="abs(CWT), zoomed", show=0, cmap='bone')
plt.axhline(30, color='r')
plt.show()
#%%## Animate rows ###########################################################
def row_anim(Wxz, idxs, scales, superposed=False):
mx = np.max(np.abs(Wxz))
for scale, row in zip(scales[idxs], Wxz):
if row.max() == Wxz.max():
plt.plot(row.real, color='r')
else:
plt.plot(row.real, color='tab:blue')
plt.ylim(-1.05*mx, 1.05*mx)
if not superposed:
plt.annotate("scale=%.1f" % scale, weight='bold', fontsize=14,
xy=(.85, .93), xycoords='axes fraction')
plt.show()
else:
plt.xlim(0, len(row) // 4)
plt.show()
#%%
row_anim(Wxz, idxs, scales)
#%%## Superimpose ####
row_anim(Wxz, idxs, scales, superposed=True)
#%%## Synchrosqueeze
Tx, _, ssq_freqs, *_ = ssq_cwt(x, wavelet, t=_t(0, 1, N))
#%%
imshow(Tx, abs=1, title="abs(SSWT)", yticks=ssq_freqs, show=1)
#%%# Damped pendulum example ################################################
N, w0 = 4096, 25
t = _t(0, 6, N)
s = np.exp(-t) * np.cos(w0 * t)
w = np.linspace(-40, 40, N)
S = (1 + 1j * w) / ((1 + 1j * w)**2 + w0**2)
#%%# Plot ####
plot(s, title="s(t)", show=1)
plot(w, np.abs(S), title="abs(FT(s(t)))", show=1)
#%%# Now SSWT ##
wavelet = ('morlet', {'mu': 5})
Tx, *_ = ssq_cwt(s, wavelet, t=t)
#%%# 'cheat' a little; could use boundary wavelets instead (not implemented)
aTxz = np.abs(Tx)[:, len(t) // 8:]
imshow(aTxz, abs=1, title="abs(SSWT(s(t)))", show=1, cmap='bone')
#%%
mxidx = np.where(np.abs(aTxz) == np.abs(aTxz).max())[0][0]
plot(aTxz[mxidx], title="max row of abs(SSWT(s(t)))", show=1)
| 2,775
| 31.27907
| 78
|
py
|
ssqueezepy
|
ssqueezepy-master/examples/cwt_higher_order.py
|
# -*- coding: utf-8 -*-
"""Show CWT with higher-order Generalized Morse Wavelets on parallel reflect-added
linear chirps, with and without noise, and show GMW waveforms.
"""
if __name__ != '__main__':
raise Exception("ran example file as non-main")
import numpy as np
from ssqueezepy import cwt, TestSignals
from ssqueezepy.visuals import viz_cwt_higher_order, viz_gmw_orders
#%%# CWT with higher-order GMWs #############################################
N = 1024
order = 2
tsigs = TestSignals()
x, t = tsigs.par_lchirp(N=N)
x += x[::-1]
for noise in (False, True):
if noise:
x += np.random.randn(len(x))
Wx_k, scales = cwt(x, 'gmw', order=range(order + 1), average=False)
viz_cwt_higher_order(Wx_k, scales, 'gmw')
print("=" * 80)
#%%# Higher-order GMWs #######################################################
gamma, beta, norm = 3, 60, 'bandpass'
n_orders = 3
scale = 5
viz_gmw_orders(N, n_orders, scale, gamma, beta, norm)
| 959
| 27.235294
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/examples/ridge_chirp.py
|
# -*- coding: utf-8 -*-
if __name__ != '__main__':
raise Exception("ran example file as non-main")
import numpy as np
from numpy.fft import rfft
from ssqueezepy import ssq_cwt, issq_cwt, cwt
from ssqueezepy.toolkit import lin_band, cos_f, mad_rms
from ssqueezepy.visuals import imshow, plot, scat
#%%###########################################################################
def echirp(N):
t = np.linspace(0, 10, N, False)
return np.cos(2 * np.pi * np.exp(t / 3)), t
#%%## Configure signal #######################################################
N = 2048
noise_var = 6 # noise variance; compare error against = 12
x, ts = echirp(N)
x *= (1 + .3 * cos_f([1], N)) # amplitude modulation
xo = x.copy()
np.random.seed(4)
x += np.sqrt(noise_var) * np.random.randn(len(x))
#### Show signal & its global spectrum #######################################
axf = np.abs(rfft(x))
plot(xo); scat(xo, s=8, show=1)
plot(x); scat(x, s=8, show=1)
plot(axf, show=1)
#%%# Synchrosqueeze ##########################################################
kw = dict(wavelet=('morlet', {'mu': 4.5}), nv=32, scales='log')
Tx, *_ = ssq_cwt(x, t=ts, **kw)
Wx, *_ = cwt(x, t=ts, **kw)
#%%# Visualize ###############################################################
pkw = dict(abs=1, cmap='bone')
_Tx = np.pad(Tx, [[4, 4]]) # improve display of top- & bottom-most freqs
imshow(Wx, **pkw)
imshow(_Tx, norm=(0, 4e-1), **pkw)
#%%# Estimate inversion ridge ###############################################
bw, slope, offset = .035, -.46, -1.8
Cs, freqband = lin_band(Tx, slope, offset, bw, norm=(0, 4e-1))
#%%###########################################################################
xrec = issq_cwt(Tx, kw['wavelet'], Cs, freqband)[0]
plot(xo)
plot(xrec, show=1)
axof = np.abs(rfft(xo))
axrecf = np.abs(rfft(xrec))
plot(axof)
plot(axrecf, show=1)
print("signal MAD/RMS: %.6f" % mad_rms(xo, xrec))
print("spectrum MAD/RMS: %.6f" % mad_rms(axof, axrecf))
| 1,949
| 30.967213
| 78
|
py
|
ssqueezepy
|
ssqueezepy-master/examples/phase_ssqueeze.py
|
# -*- coding: utf-8 -*-
"""Experimental feature example."""
if __name__ != '__main__':
raise Exception("ran example file as non-main")
import numpy as np
from ssqueezepy import TestSignals, ssq_cwt, Wavelet
from ssqueezepy.visuals import imshow
from ssqueezepy.experimental import phase_ssqueeze
#%%
x = TestSignals(N=2048).par_lchirp()[0]
x += x[::-1]
wavelet = Wavelet()
Tx0, Wx, _, scales, *_ = ssq_cwt(x, wavelet, get_dWx=1)
Tx1, *_ = phase_ssqueeze(Wx, wavelet=wavelet, scales=scales, flipud=1)
adiff = np.abs(Tx0 - Tx1)
print(adiff.mean(), adiff.max(), adiff.sum())
#%%
# main difference near boundaries; see `help(trigdiff)` w/ `rpadded=False`
imshow(Tx1, abs=1)
| 678
| 27.291667
| 74
|
py
|
ssqueezepy
|
ssqueezepy-master/examples/test_transforms.py
|
# -*- coding: utf-8 -*-
if __name__ != '__main__':
raise Exception("ran example file as non-main")
import numpy as np
import scipy.signal as sig
from ssqueezepy import Wavelet, TestSignals
from ssqueezepy.utils import window_resolution
tsigs = TestSignals(N=2048)
#%%# Viz signals #############################################################
# set `dft` to 'rows' or 'cols' to also plot signals' DFT, along rows or columns
dft = (None, 'rows', 'cols')[0]
tsigs.demo(dft=dft)
#%%# How to specify `signals` ################################################
signals = [
'am-cosine',
('hchirp', dict(fmin=.2)),
('sine:am-cosine', (dict(f=32, phi0=1), dict(amin=.3))),
]
tsigs.demo(signals, N=2048)
#%%# With `dft` ##################
tsigs.demo(signals, dft='rows')
tsigs.demo(signals, dft='cols')
#%%# Viz CWT & SSQ_CWT with different wavelets ###############################
tsigs = TestSignals(N=2048)
wavelets = [
Wavelet(('gmw', {'beta': 60})),
Wavelet(('gmw', {'beta': 5})),
]
tsigs.wavcomp(wavelets, signals='all')
#%%#
tsigs.wavcomp(wavelets, signals=[('#echirp', dict(fmin=.1))], N=2048)
#%%# Viz CWT vs STFT (& SSQ'd) ###############################################
# (N, beta, NW): (512, 42.5, 255); (256, 21.5, 255)
N = 2048
signals = 'all'
n_fft = N
win_len = 720
tsigs = TestSignals(N=N)
wavelet = Wavelet(('GMW', {'beta': 60}))
NW = win_len//2 - 1
window = np.abs(sig.windows.dpss(win_len, NW))
window = np.pad(window, (N - len(window))//2)
assert len(window) == N
window_name = 'DPSS'
config_str = 'NW=%s, win_len=%s, win_pad_len=%s' % (
NW, win_len, len(window) - win_len)
# ensure `wavelet` and `window` have ~same time & frequency resolutions
print("std_w, std_t, harea\nwavelet: {:.4f}, {:.4f}, {:.8f}"
"\nwindow: {:.4f}, {:.4f}, {:.8f}".format(
wavelet.std_w, wavelet.std_t, wavelet.harea,
*window_resolution(window)))
#%%
kw = dict(wavelet=wavelet, window=window, win_len=None, n_fft=n_fft,
window_name=window_name, config_str=config_str)
tsigs.cwt_vs_stft(N=N, signals=signals, **kw)
#%%# Noisy example ###########################################################
N = 2048
snr = -2 # in dB
signals = 'packed-poly'
tsigs = TestSignals(N=N, snr=snr)
tsigs.cwt_vs_stft(N=N, signals=signals, **kw)
#%%# Ridge extraction ########################################################
N = 512
signals = 'poly-cubic'
snr = None
n_ridges = 3
penalty = 25
tsigs = TestSignals(N=N, snr=snr)
kw = dict(N=N, signals=signals, n_ridges=n_ridges, penalty=penalty)
tsigs.ridgecomp(transform='cwt', **kw)
tsigs.ridgecomp(transform='stft', **kw)
| 2,620
| 29.476744
| 80
|
py
|
ssqueezepy
|
ssqueezepy-master/examples/extracting_ridges.py
|
# -*- coding: utf-8 -*-
"""Authors: David Bondesson, OverLordGoldDragon
Ridge extraction on signals with varying time-frequency characteristics.
"""
if __name__ != '__main__':
raise Exception("ran example file as non-main")
import numpy as np
import scipy.signal as sig
from ssqueezepy import ssq_cwt, ssq_stft, extract_ridges, TestSignals
from ssqueezepy.visuals import plot, imshow
#%%## Visual methods #########################################################
def viz(x, Tf, ridge_idxs, yticks=None, ssq=False, transform='cwt', show_x=True):
if show_x:
plot(x, title="x(t)", show=1,
xlabel="Time [samples]", ylabel="Signal Amplitude [A.U.]")
ylabel = ("Frequency scales [1/Hz]" if (transform == 'cwt' and not ssq) else
"Frequencies [Hz]")
title = "abs({}{}) w/ ridge_idxs".format("SSQ_" if ssq else "",
transform.upper())
ikw = dict(abs=1, cmap='turbo', yticks=yticks, title=title)
pkw = dict(linestyle='--', color='k', xlabel="Time [samples]", ylabel=ylabel,
xlims=(0, Tf.shape[1]))
imshow(Tf, **ikw, show=0)
plot(ridge_idxs, **pkw, show=1)
def tf_transforms(x, t, wavelet='morlet', window=None, padtype='wrap',
penalty=.5, n_ridges=2, cwt_bw=15, stft_bw=15,
ssq_cwt_bw=4, ssq_stft_bw=4):
kw_cwt = dict(t=t, padtype=padtype)
kw_stft = dict(fs=1/(t[1] - t[0]), padtype=padtype, flipud=1)
Twx, Wx, ssq_freqs_c, scales, *_ = ssq_cwt(x, wavelet, **kw_cwt)
Tsx, Sx, ssq_freqs_s, Sfs, *_ = ssq_stft(x, window, **kw_stft)
Sx, Sfs = Sx[::-1], Sfs[::-1]
ckw = dict(penalty=penalty, n_ridges=n_ridges, transform='cwt')
skw = dict(penalty=penalty, n_ridges=n_ridges, transform='stft')
cwt_ridges = extract_ridges(Wx, scales, bw=cwt_bw, **ckw)
ssq_cwt_ridges = extract_ridges(Twx, ssq_freqs_c, bw=ssq_cwt_bw, **ckw)
stft_ridges = extract_ridges(Sx, Sfs, bw=stft_bw, **skw)
ssq_stft_ridges = extract_ridges(Tsx, ssq_freqs_s, bw=ssq_stft_bw, **skw)
viz(x, Wx, cwt_ridges, scales, ssq=0, transform='cwt', show_x=1)
viz(x, Twx, ssq_cwt_ridges, ssq_freqs_c, ssq=1, transform='cwt', show_x=0)
viz(x, Sx, stft_ridges, Sfs, ssq=0, transform='stft', show_x=0)
viz(x, Tsx, ssq_stft_ridges, ssq_freqs_s, ssq=1, transform='stft', show_x=0)
#%%# Basic example ###########################################################
# Example ridge from similar example as can be found at MATLAB:
# https://www.mathworks.com/help/wavelet/ref/wsstridge.html#bu6we25-penalty
test_matrix = np.array([[1, 4, 4], [2, 2, 2], [5, 5, 4]])
fs_test = np.exp([1, 2, 3])
ridge_idxs, *_ = extract_ridges(test_matrix, fs_test, penalty=2.0,
get_params=True)
print("Ridge follows indexes:", ridge_idxs)
assert np.allclose(ridge_idxs, np.array([[2, 2, 2]]))
#%%# sin + cos ###############################################################
N, f1, f2 = 513, 5, 20
padtype = 'wrap'
penalty = 20
t = np.linspace(0, 1, N, endpoint=True)
x1 = np.sin(2*np.pi * f1 * t)
x2 = np.cos(2*np.pi * f2 * t)
x = x1 + x2
tf_transforms(x, t, padtype=padtype, penalty=penalty)
#%%# Linear + quadratic chirp ################################################
N = 513
penalty = 20
padtype = 'reflect'
t = np.linspace(0, 20, N, endpoint=True)
x1 = sig.chirp(t, f0=2, f1=8, t1=20, method='linear')
x2 = sig.chirp(t, f0=.4, f1=4, t1=20, method='quadratic')
x = x1 + x2
tf_transforms(x, t, padtype=padtype, stft_bw=4, penalty=penalty)
#%%# Cubic polynomial frequency variation + pure tone ########################
N, f = 257, 0.5
padtype = 'wrap'
penalty = 20
t = np.linspace(0, 10, N, endpoint=True)
p1 = np.poly1d([0.025, -0.36, 1.25, 2.0])
p3 = np.poly1d([0.01, -0.25, 1.5, 4.0])
x1 = sig.sweep_poly(t, p1)
x3 = sig.sweep_poly(t, p3)
x2 = np.sin(2*np.pi * f * t)
x = x1 + x2 + x3
# x += np.sqrt(1) * np.random.randn(len(x))
tf_transforms(x, t, n_ridges=3, padtype=padtype, stft_bw=4, ssq_stft_bw=4,
penalty=penalty)
#%%# Reflect-added linear chirps #############################################
N = 512
penalty = 2
tsigs = TestSignals(N)
x, t = tsigs.lchirp(N)
x += x[::-1]
tf_transforms(x, t, penalty=penalty, cwt_bw=10)
#%%# Parallel F.M. linear chirps ############################################
N = 512
penalty = 2
padtype = 'reflect'
tsigs = TestSignals(N)
x, t = tsigs.par_lchirp(N)
tf_transforms(x, t, penalty=penalty, padtype=padtype)
| 4,536
| 35.296
| 81
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/fft_test.py
|
# -*- coding: utf-8 -*-
"""Fast Fourier Transform, CPU parallelization, and GPU execution tests:
- multi-thread CPU & GPU outputs match that of single-thread CPU
- batched (multi-input) outputs match single for-looped
- `ssqueezepy.FFT` outputs match `scipy`'s
- unified synchrosqueezing pipelines outputs match that of v0.6.0
Note that GPU tests are skipped in CI (Travis), and are instead done locally.
"""
import os
import pytest
import warnings
import numpy as np
from scipy.fft import fft as sfft, rfft as srfft, ifft as sifft, irfft as sirfft
from scipy.fft import ifftshift
import ssqueezepy
from ssqueezepy import TestSignals, Wavelet, ssq_stft, ssq_cwt
from ssqueezepy import fft, rfft, ifft, irfft, cwt
from ssqueezepy.algos import indexed_sum, indexed_sum_onfly, ssqueeze_fast
from ssqueezepy.algos import phase_cwt_cpu, phase_cwt_gpu, replace_under_abs
from ssqueezepy.algos import phase_stft_gpu, phase_stft_cpu
from ssqueezepy.configs import gdefaults
from ssqueezepy.utils import process_scales, buffer
# no visuals here but 1 runs as regular script instead of pytest, for debugging
VIZ = 0
try:
import torch
torch.tensor(1, device='cuda')
CAN_GPU = True
except:
CAN_GPU = False
warnings.warn("SKIPPED TESTS in `fft_test.py`, GPU not found.")
def _wavelet(name='gmw', **kw):
return Wavelet((name, kw))
def test_1D():
os.environ['SSQ_GPU'] = '0'
for N in (128, 129):
x = np.random.randn(N)
xf = x[:N//2 + 1] * (1 + 2j)
souts = dict(fft=sfft(x), rfft=srfft(x), ifft=sifft(xf),
irfft1=sirfft(xf), irfft2=sirfft(xf, n=len(x)))
for patience in (0, (1, 1), (2, 1)):
qouts = dict(
fft=fft(x, patience=patience),
rfft=rfft(x, patience=patience),
ifft=ifft(xf, patience=patience),
irfft1=irfft(xf, patience=patience),
irfft2=irfft(xf, patience=patience, n=len(x)),
)
for name, qout in qouts.items():
assert np.allclose(qout, souts[name]), (
"{}: N={}, patience={}".format(name, N, patience))
def test_2D():
os.environ['SSQ_GPU'] = '0'
for N in (128, 129):
for M in (64, 65):
for axis in (0, 1):
x = np.random.randn(N, M)
if axis == 0:
xf = x[:N//2 + 1] * (1 + 2j)
else:
xf = x[:, :M//2 + 1] * (1 + 2j)
souts = dict(
fft=sfft(x, axis=axis),
rfft=srfft(x, axis=axis),
ifft=sifft(xf, axis=axis),
irfft1=sirfft(xf, axis=axis),
irfft2=sirfft(xf, axis=axis, n=x.shape[axis]),
)
for patience in (0, (1, .5), (2, .5)):
kw = dict(axis=axis, patience=patience)
qouts = dict(
fft=fft(x, **kw),
rfft=rfft(x, **kw),
ifft=ifft(xf, **kw),
irfft1=irfft(xf, **kw),
irfft2=irfft(xf, **kw, n=x.shape[axis]),
)
for name, qout in qouts.items():
assert np.allclose(qout, souts[name]), (
"{}: (N, M)=({}, {}), patience={}".format(
name, N, M, patience))
def test_exhaustive():
"""Ensure exhaustive case works."""
os.environ['SSQ_GPU'] = '0'
fft(np.random.randn(4), patience=(2, None))
def test_indexed_sum():
os.environ['SSQ_GPU'] = '0'
Wx = np.random.randn(1000, 1000).astype('complex64')
k = np.random.randint(0, len(Wx), Wx.shape)
out1 = indexed_sum(Wx, k, parallel=False)
out2 = indexed_sum(Wx, k, parallel=True)
assert np.allclose(out1, out2), "MAE: %s" % np.mean(np.abs(out1 - out2))
def test_parallel_setting():
"""Assert
1. ssqueezepy is parallel by default
2. `configs.ini` includes parallel config
3. os.environ flag overrides `configs.ini`
"""
os.environ['SSQ_GPU'] = '0'
assert ssqueezepy.IS_PARALLEL()
parallel = gdefaults('configs.IS_PARALLEL', parallel=None)
assert parallel is not None
assert parallel == 1
os.environ['SSQ_PARALLEL'] = '0'
try:
assert not ssqueezepy.IS_PARALLEL()
except AssertionError:
raise AssertionError()
finally:
# ensure `os.environ` is cleaned even if assert fails
os.environ.pop('SSQ_PARALLEL')
def _noninf_mean(x):
x[np.isinf(x) | np.isnan(x)] = 0
return x.mean()
def test_phase_cwt():
os.environ['SSQ_GPU'] = '0'
x = TestSignals(N=1000).par_lchirp()[0]
x += x[::-1]
wavelet = Wavelet()
scales = process_scales('log', len(x), wavelet, nv=32)[:240]
Wx, _, dWx = cwt(x, wavelet, scales=scales, derivative=True, cache_wavelet=1)
for dtype in ('complex128', 'complex64'):
# Wx = np.random.randn(100, 8192).astype(dtype) * (1 + 2j)
# dWx = np.random.randn(100, 8192).astype(dtype) * (2 - 1j)
Wx, dWx = Wx.astype(dtype), dWx.astype(dtype)
if CAN_GPU:
Wxt = torch.tensor(Wx, device='cuda')
dWxt = torch.tensor(dWx, device='cuda')
gamma = 1e-2
_out = (dWx / Wx).imag / (2 * np.pi)
_out[np.abs(Wx) < gamma] = np.inf
_out = np.abs(_out)
out0 = phase_cwt_cpu(Wx, dWx, gamma, parallel=False)
out1 = phase_cwt_cpu(Wx, dWx, gamma, parallel=True)
if CAN_GPU:
out2 = phase_cwt_gpu(Wxt, dWxt, gamma).cpu().numpy()
with np.errstate(invalid='ignore'):
mape0_ = _noninf_mean(np.abs(_out - out0) / np.abs(_out))
mape01 = _noninf_mean(np.abs(out0 - out1) / np.abs(out0))
if CAN_GPU:
mape02 = _noninf_mean(np.abs(out0 - out2) / np.abs(out0))
assert np.allclose(out0, _out), ("base", dtype, mape0_)
assert np.allclose(out0, out1), ("parallel", dtype, mape01)
if CAN_GPU:
assert np.allclose(out0, out2), ("gpu", dtype, mape02)
def test_phase_stft():
atol = 1e-7
np.random.seed(0)
for dtype in ('float64', 'float32'):
Wx = np.random.randn(100, 1028).astype(dtype) * (1 + 2j)
dWx = np.random.randn(100, 1028).astype(dtype) * (2 - 1j)
Sfs = np.linspace(0, .5, len(Wx)).astype(dtype)
if CAN_GPU:
Wxt = torch.as_tensor(Wx, device='cuda')
dWxt = torch.as_tensor(dWx, device='cuda')
Sfst = torch.as_tensor(Sfs, device='cuda')
gamma = 1e-2
_out = Sfs[:, None] - (dWx / Wx).imag / (2*np.pi)
_out[np.abs(Wx) < gamma] = np.inf
_out = np.abs(_out)
out0 = phase_stft_cpu(Wx, dWx, Sfs, gamma, parallel=False)
out1 = phase_stft_cpu(Wx, dWx, Sfs, gamma, parallel=True)
if CAN_GPU:
out2 = phase_stft_gpu(Wxt, dWxt, Sfst, gamma).cpu().numpy()
with np.errstate(invalid='ignore'):
mape0_ = _noninf_mean(np.abs(_out - out0) / np.abs(_out))
mape01 = _noninf_mean(np.abs(out0 - out1) / np.abs(out0))
if CAN_GPU:
mape02 = _noninf_mean(np.abs(out0 - out2) / np.abs(out0))
assert np.allclose(out0, _out, atol=atol), ("base", dtype, mape0_)
assert np.allclose(out0, out1, atol=atol), ("parallel", dtype, mape01)
if CAN_GPU:
assert np.allclose(out0, out2, atol=atol), ("gpu", dtype, mape02)
def test_replace_under_abs():
np.random.seed(0)
gamma = 1e-2
for dtype in ('float32', 'float64'):
w0 = np.random.randn(100, 200).astype(dtype)
Wx = np.random.randn(100, 200).astype(dtype) * (2 - 1j)
w1 = w0.copy()
if CAN_GPU:
wt = torch.tensor(w0, device='cuda')
Wxt = torch.tensor(Wx, device='cuda')
replace_under_abs(w0, Wx, gamma, np.inf, parallel=False)
replace_under_abs(w1, Wx, gamma, np.inf, parallel=True)
if CAN_GPU:
replace_under_abs(wt, Wxt, gamma, np.inf)
wt = wt.cpu().numpy()
assert np.allclose(w0, w1), ("parallel", dtype)
if CAN_GPU:
assert np.allclose(w0, wt), ("gpu", dtype)
def _make_ssq_freqs(M, scaletype):
if scaletype == 'log-piecewise':
sf = np.logspace(0, np.log10(M), 2*M)
sf1 = sf[:M//2]
sf2 = sf[M//2 + 3 - 1::3]
ssq_freqs = np.hstack([sf1, sf2])
elif scaletype == 'log':
ssq_freqs = np.logspace(0, np.log10(M), M)
elif scaletype == 'linear':
ssq_freqs = np.linspace(0, M, M)
return ssq_freqs
def test_indexed_sum_onfly():
np.random.seed(0)
for dtype in ('float32', 'float64'):
Wx = np.random.randn(100, 512).astype(dtype) * (1 + 2j)
w = np.abs(np.random.randn(*Wx.shape).astype(dtype))
w *= (2*len(Wx) / w.max())
if CAN_GPU:
Wxt, wt = [torch.tensor(g, device='cuda') for g in (Wx, w)]
for scaletype in ('log-piecewise', 'log', 'linear'):
for flipud in (False, True):
ssq_freqs = _make_ssq_freqs(len(Wx), scaletype)
ssq_logscale = scaletype.startswith('log')
const = (np.log(2) / 32 if 1 else
ssq_freqs)
out0 = indexed_sum_onfly(Wx, w, ssq_freqs, const, ssq_logscale,
flipud=flipud, parallel=False)
out1 = indexed_sum_onfly(Wx, w, ssq_freqs, const, ssq_logscale,
flipud=flipud, parallel=True)
if CAN_GPU:
out2 = indexed_sum_onfly(Wxt, wt, ssq_freqs, const, ssq_logscale,
flipud=flipud).cpu().numpy()
adiff01 = np.abs(out0 - out1).mean()
if CAN_GPU:
adiff02 = np.abs(out0 - out2).mean()
# this is due to `const` varying rather than 'linear'
th = ((1e-16 if dtype == 'float64' else 1e-8) if ssq_logscale else
(1e-13 if dtype == 'float64' else 1e-5))
assert adiff01 < th, (scaletype, dtype, flipud, adiff01)
if CAN_GPU:
assert adiff02 < th, (scaletype, dtype, flipud, adiff02)
def test_ssqueeze_cwt():
np.random.seed(0)
gamma = 1e-2
for dtype in ('float32', 'float64'):
Wx = np.random.randn(100, 512).astype(dtype) * (1 + 2j)
dWx = np.random.randn(100, 512).astype(dtype) * (2 - 1j)
if CAN_GPU:
Wxt, dWxt = [torch.tensor(g, device='cuda') for g in (Wx, dWx)]
for scaletype in ('log-piecewise', 'log', 'linear'):
for flipud in (False, True):
ssq_freqs = _make_ssq_freqs(len(Wx), scaletype)
ssq_logscale = scaletype.startswith('log')
const = (np.log(2) / 32 if ssq_logscale else
ssq_freqs)
args = (ssq_freqs, const, ssq_logscale)
kw = dict(flipud=flipud, gamma=gamma)
out0 = ssqueeze_fast(Wx, dWx, *args, **kw, parallel=False)
out1 = ssqueeze_fast(Wx, dWx, *args, **kw, parallel=True)
if CAN_GPU:
out2 = ssqueeze_fast(Wxt, dWxt, *args, **kw).cpu().numpy()
adiff01 = np.abs(out0 - out1).mean()
if CAN_GPU:
adiff02 = np.abs(out0 - out2).mean()
# this is due to `const` varying rather than 'linear'
th = ((1e-16 if dtype == 'float64' else 1e-8) if ssq_logscale else
(1e-13 if dtype == 'float64' else 1e-5))
assert adiff01 < th, (scaletype, dtype, flipud, adiff01)
if CAN_GPU:
assert adiff02 < th, (scaletype, dtype, flipud, adiff02)
def test_ssqueeze_stft():
np.random.seed(0)
scaletype = 'linear'
ssq_logscale = False
gamma = 1e-2
const = np.log(2) / 32
for dtype in ('float32', 'float64'):
Sx = np.random.randn(100, 512).astype(dtype) * (1 + 2j)
dSx = np.random.randn(100, 512).astype(dtype) * (2 - 1j)
if CAN_GPU:
Sxt, dSxt = [torch.tensor(g, device='cuda') for g in (Sx, dSx)]
for flipud in (False, True):
ssq_freqs = _make_ssq_freqs(len(Sx), scaletype)
args = (ssq_freqs, const, ssq_logscale)
kw = dict(flipud=flipud, gamma=gamma)
out0 = ssqueeze_fast(Sx, dSx, *args, **kw, parallel=False)
out1 = ssqueeze_fast(Sx, dSx, *args, **kw, parallel=True)
if CAN_GPU:
out2 = ssqueeze_fast(Sxt, dSxt, *args, **kw).cpu().numpy()
adiff01 = np.abs(out0 - out1).mean()
if CAN_GPU:
adiff02 = np.abs(out0 - out2).mean()
# this is due to `const` varying rather than 'linear'
th = (1e-16 if dtype == 'float64' else 1e-8)
assert adiff01 < th, (scaletype, dtype, flipud, adiff01)
if CAN_GPU:
assert adiff02 < th, (scaletype, dtype, flipud, adiff02)
def test_ssqueeze_vs_indexed_sum():
"""Computing `Tx` in one loop vs. first computing `w` then summing."""
np.random.seed(0)
gamma = 1e-2
for dtype in ('float32', 'float64'):
Wx = np.random.randn(100, 512).astype(dtype) * (1 + 2j)
dWx = np.random.randn(100, 512).astype(dtype) * (2 - 1j)
w = np.abs((dWx / Wx).imag / (2*np.pi))
w[np.abs(Wx) < gamma] = np.inf
for scaletype in ('log-piecewise', 'log', 'linear'):
for flipud in (False, True):
ssq_freqs = _make_ssq_freqs(len(Wx), scaletype)
ssq_logscale = scaletype.startswith('log')
const = (np.log(2) / 32 if ssq_logscale else
ssq_freqs)
args = (ssq_freqs, const, ssq_logscale)
kw = dict(parallel=False, flipud=flipud)
out0 = indexed_sum_onfly(Wx, w, *args, **kw)
out1 = ssqueeze_fast(Wx, dWx, *args, **kw, gamma=gamma)
adiff01 = np.abs(out0 - out1).mean()
# this is due to `const` varying rather than 'linear'
th = ((1e-16 if dtype == 'float64' else 1e-8) if ssq_logscale else
(1e-13 if dtype == 'float64' else 1e-5))
assert adiff01 < th, (scaletype, dtype, flipud, adiff01)
def test_buffer():
"""Test that CPU & GPU outputs match for `modulated=True` & `=False`,
and that `modulated=True` matches `ifftshift(buffer(modulated=False))`.
Also that single- & multi-thread CPU outputs agree.
Test both single and batched input.
"""
N = 128
tsigs = TestSignals(N=N)
for dtype in ('float64', 'float32'):
for ndim in (1, 2):
x = (tsigs.cosine()[0].astype(dtype) if ndim == 1 else
np.random.randn(4, N))
xt = torch.as_tensor(x, device='cuda') if CAN_GPU else 0
for modulated in (False, True):
for seg_len in (N//2, N//2 - 1):
for n_overlap in (N//2 - 1, N//2 - 2, N//2 - 3):
if seg_len == n_overlap:
continue
out0 = buffer(x, seg_len, n_overlap, modulated, parallel=True)
if modulated:
out00 = buffer(x, seg_len, n_overlap, modulated=False,
parallel=False)
out00 = ifftshift(out00, axes=0 if ndim == 1 else 1)
if CAN_GPU:
out1 = buffer(xt, seg_len, n_overlap, modulated).cpu().numpy()
assert_params = (dtype, modulated, seg_len, n_overlap)
if modulated:
adiff000 = np.abs(out0 - out00).mean()
assert adiff000 == 0, (*assert_params, adiff000)
if CAN_GPU:
adiff01 = np.abs(out0 - out1).mean()
assert adiff01 == 0, (*assert_params, adiff01)
def test_ssq_stft():
N = 256
tsigs = TestSignals(N=N)
gpu_atol = 1e-5
for dtype in ('float64', 'float32'):
x = tsigs.par_lchirp()[0].astype(dtype)
kw = dict(modulated=1, n_fft=128, dtype=dtype, astensor=False)
os.environ['SSQ_GPU'] = '0'
Tx00 = ssq_stft(x, **kw, get_w=1)[0]
Tx01 = ssq_stft(x, **kw, get_w=0)[0]
if CAN_GPU:
os.environ['SSQ_GPU'] = '1'
Tx10 = ssq_stft(x, **kw, get_w=1)[0]
Tx11 = ssq_stft(x, **kw, get_w=0)[0]
adiff0001 = np.abs(Tx00 - Tx01).mean()
assert np.allclose(Tx00, Tx01), (dtype, adiff0001)
if CAN_GPU:
adiff0010 = np.abs(Tx00 - Tx10).mean()
adiff0011 = np.abs(Tx00 - Tx11).mean()
assert np.allclose(Tx00, Tx10, atol=gpu_atol), (dtype, adiff0010)
assert np.allclose(Tx00, Tx11, atol=gpu_atol), (dtype, adiff0011)
def test_ssq_cwt():
N = 256
tsigs = TestSignals(N=N)
for dtype in ('float64', 'float32'):
gpu_atol = 1e-8 if dtype == 'float64' else 6e-3
x = tsigs.par_lchirp()[0].astype(dtype)
kw = dict(astensor=False)
os.environ['SSQ_GPU'] = '0'
Tx00 = ssq_cwt(x, _wavelet(dtype=dtype), **kw, get_w=1)[0]
Tx01 = ssq_cwt(x, _wavelet(dtype=dtype), **kw, get_w=0)[0]
if CAN_GPU:
os.environ['SSQ_GPU'] = '1'
Tx10 = ssq_cwt(x, _wavelet(dtype=dtype), **kw, get_w=1)[0]
Tx11 = ssq_cwt(x, _wavelet(dtype=dtype), **kw, get_w=0)[0]
adiff0001 = np.abs(Tx00 - Tx01).mean()
if dtype == 'float64':
assert np.allclose(Tx00, Tx01), (dtype, adiff0001)
else:
assert adiff0001 < 4e-5, (dtype, adiff0001)
if CAN_GPU:
adiff0010 = np.abs(Tx00 - Tx10).mean()
adiff0011 = np.abs(Tx00 - Tx11).mean()
assert np.allclose(Tx00, Tx10, atol=gpu_atol), (dtype, adiff0010)
assert np.allclose(Tx00, Tx11, atol=gpu_atol), (dtype, adiff0011)
os.environ['SSQ_GPU'] = '0'
def test_wavelet_dtype_gmw():
"""Ensure `Wavelet.fn` output is of specified `dtype` for GMW wavelet,
and that `.info()` is computable.
"""
for SSQ_GPU in ('0', '1'):
if SSQ_GPU == '1' and not CAN_GPU:
continue
for order in (0, 1):
for norm in ('bandpass', 'energy'):
for dtype in ('float64', 'float32'):
os.environ['SSQ_GPU'] = SSQ_GPU
kw = dict(order=order, norm=norm, dtype=dtype)
wavelet = _wavelet('gmw', **kw)
if norm == 'energy':
dtype = 'float64'
assert wavelet.dtype == dtype, (
"GPU={}, order={}, norm={}, dtype={}, wavelet.dtype={}".format(
SSQ_GPU, order, norm, dtype, wavelet.dtype))
wavelet.info()
os.environ['SSQ_GPU'] = '0'
def test_wavelet_dtype():
"""Ensure `Wavelet.fn` output is of specified `dtype` for non-GMW wavelets,
and that `.info()` is computable.
"""
for SSQ_GPU in ('0', '1'):
if SSQ_GPU == '1' and not CAN_GPU:
continue
for name in ('morlet', 'bump', 'cmhat', 'hhhat'):
for dtype in ('float64', 'float32'):
os.environ['SSQ_GPU'] = SSQ_GPU
wavelet = _wavelet(name, dtype=dtype)
assert wavelet.dtype == dtype, (
"GPU={}, name={}, dtype={}, wavelet.dtype={}".format(
SSQ_GPU, name, dtype, wavelet.dtype))
wavelet.info()
os.environ['SSQ_GPU'] = '0'
def test_higher_order():
"""`cwt` & `ssq_cwt` CPU & GPU outputs agreement."""
if not CAN_GPU:
return
tsigs = TestSignals(N=256)
x = tsigs.par_lchirp()[0]
x += x[::-1]
kw = dict(order=range(3), astensor=False)
for dtype in ('float32', 'float64'):
os.environ['SSQ_GPU'] = '0'
Tx0, Wx0, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
os.environ['SSQ_GPU'] = '1'
Tx1, Wx1, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
adiff_Tx = np.abs(Tx0 - Tx1).mean()
adiff_Wx = np.abs(Wx0 - Wx1).mean()
# less should be possible for float64, but didn't investigate
th = 2e-7 if dtype == 'float64' else 1e-4
assert adiff_Tx < th, (dtype, adiff_Tx, th)
assert adiff_Wx < th, (dtype, adiff_Wx, th)
os.environ['SSQ_GPU'] = '0'
def test_cwt_for_loop():
"""Ensure `vectorized=False` runs on GPU and outputs match `=True`."""
if not CAN_GPU:
return
np.random.seed(0)
x = np.random.randn(256)
kw = dict(derivative=True, astensor=False)
os.environ['SSQ_GPU'] = '1'
for dtype in ('float64', 'float32'):
Wx0, _, dWx0 = cwt(x, _wavelet(dtype=dtype), vectorized=False, **kw)
Wx1, _, dWx1 = cwt(x, _wavelet(dtype=dtype), vectorized=True, **kw)
adiff_Wx = np.abs(Wx0 - Wx1)
adiff_dWx = np.abs(dWx0 - dWx1)
atol = 1e-12 if dtype == 'float64' else 1e-6
assert np.allclose(Wx0, Wx1, atol=atol), (dtype, adiff_Wx.mean())
assert np.allclose(dWx0, dWx1, atol=atol), (dtype, adiff_dWx.mean())
def test_ssq_cwt_batched():
"""Ensure batched (2D `x`) inputs output same as if samples fed separately,
and agreement between CPU & GPU.
"""
np.random.seed(0)
x = np.random.randn(4, 256)
kw = dict(astensor=False)
for dtype in ('float64', 'float32'):
os.environ['SSQ_GPU'] = '0'
Tx0, Wx0, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
Tx00 = np.zeros(Tx0.shape, dtype=Tx0.dtype)
Wx00 = Tx00.copy()
for i, _x in enumerate(x):
out = ssq_cwt(_x, _wavelet(dtype=dtype), **kw)
Tx00[i], Wx00[i] = out[0], out[1]
if CAN_GPU:
os.environ['SSQ_GPU'] = '1'
Tx1, Wx1, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
atol = 1e-12 if dtype == 'float64' else 1e-2
adiff_Tx000 = np.abs(Tx00 - Tx0).mean()
adiff_Wx000 = np.abs(Wx00 - Wx0).mean()
assert np.allclose(Wx00, Wx0), (dtype, adiff_Wx000)
assert np.allclose(Tx00, Tx0), (dtype, adiff_Tx000)
if CAN_GPU:
adiff_Tx01 = np.abs(Tx0 - Tx1).mean()
adiff_Wx01 = np.abs(Wx0 - Wx1).mean()
assert np.allclose(Wx0, Wx1, atol=atol), (dtype, adiff_Wx01)
assert np.allclose(Tx0, Tx1, atol=atol), (dtype, adiff_Tx01)
# didn't investigate float32, and `allclose` threshold is pretty bad,
# so check MAE
if dtype == 'float32':
assert adiff_Tx01 < 2.5e-5, (dtype, adiff_Tx01)
def test_ssq_stft_batched():
"""Ensure batched (2D `x`) inputs output same as if samples fed separately,
and agreement between CPU & GPU.
"""
np.random.seed(0)
x = np.random.randn(4, 256)
for dtype in ('float64', 'float32'):
os.environ['SSQ_GPU'] = '0'
kw = dict(astensor=False, dtype=dtype)
Tx0, Sx0, *_ = ssq_stft(x, **kw)
Tx00 = np.zeros(Tx0.shape, dtype=Tx0.dtype)
Sx00 = Tx00.copy()
for i, _x in enumerate(x):
out = ssq_stft(_x, **kw)
Tx00[i], Sx00[i] = out[0], out[1]
if CAN_GPU:
os.environ['SSQ_GPU'] = '1'
Tx1, Sx1, *_ = ssq_stft(x, **kw)
atol = 1e-12 if dtype == 'float64' else 1e-6
adiff_Tx000 = np.abs(Tx00 - Tx0).mean()
adiff_Sx000 = np.abs(Sx00 - Sx0).mean()
assert np.allclose(Sx00, Sx0), (dtype, adiff_Sx000)
assert np.allclose(Tx00, Tx0), (dtype, adiff_Tx000)
if CAN_GPU:
adiff_Tx01 = np.abs(Tx0 - Tx1)
adiff_Sx01 = np.abs(Sx0 - Sx1)
assert np.allclose(Sx0, Sx1, atol=atol), (dtype, adiff_Sx01)
assert np.allclose(Tx0, Tx1, atol=atol), (dtype, adiff_Tx01)
def test_cwt_batched_for_loop():
"""Ensure basic batched cwt works with both `vectorized`."""
os.environ['SSQ_GPU'] = '0'
np.random.seed(0)
x = np.random.randn(4, 256)
for dtype in ('float64', 'float32'):
Wx0, *_ = cwt(x, _wavelet(dtype=dtype), vectorized=True)
Wx1, *_ = cwt(x, _wavelet(dtype=dtype), vectorized=False)
adiff_Wx01 = np.abs(Wx0 - Wx1)
assert np.allclose(Wx0, Wx1), (dtype, adiff_Wx01.mean())
if __name__ == '__main__':
if VIZ:
test_1D()
test_2D()
test_indexed_sum()
test_parallel_setting()
test_phase_cwt()
test_phase_stft()
test_replace_under_abs()
test_indexed_sum_onfly()
test_ssqueeze_cwt()
test_ssqueeze_stft()
test_ssqueeze_vs_indexed_sum()
test_buffer()
test_ssq_stft()
test_ssq_cwt()
test_wavelet_dtype_gmw()
test_wavelet_dtype()
test_higher_order()
test_cwt_for_loop()
test_ssq_cwt_batched()
test_ssq_stft_batched()
test_cwt_batched_for_loop()
else:
pytest.main([__file__, "-s"])
| 24,286
| 35.195231
| 81
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/misc_test.py
|
# -*- coding: utf-8 -*-
"""Utilities & others
"""
import pytest
from ssqueezepy.wavelets import Wavelet
from ssqueezepy.utils import cwt_scalebounds
# no visuals here but 1 runs as regular script instead of pytest, for debugging
VIZ = 0
def test_bounds():
wavelet = Wavelet(('morlet', {'mu': 6}))
for N in (4096, 2048, 1024, 512, 256, 128, 64):
try:
cwt_scalebounds(wavelet, N=N)
except Exception as e:
raise Exception(f"N={N} failed; errmsg:\n{e}")
if __name__ == '__main__':
if VIZ:
test_bounds()
else:
pytest.main([__file__, "-s"])
| 613
| 21.740741
| 79
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/conftest.py
|
def pytest_configure(config):
import traceback
t = traceback.extract_stack()
if 'pytestworker.py' in t[0][0]:
import matplotlib
matplotlib.use('template') # suppress plots when Spyder unit-testing
| 227
| 27.5
| 77
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/z_all_test.py
|
# -*- coding: utf-8 -*-
"""Lazy tests just to ensure nothing breaks.
`z_` to ensure test runs last per messing with module namespaces.
"""
#### Disable Numba JIT during testing, as pytest can't measure its coverage ##
# TODO find shorter way to do this
def njit(fn):
def decor(*args, **kw):
return fn(*args, **kw)
return decor
def jit(*args, **kw):
def wrap(fn):
return fn
return wrap
import numba
njit_orig = numba.njit
jit_orig = numba.jit
##############################################################################
import os
import pytest
import numpy as np
from ssqueezepy._cwt import _icwt_norm
from ssqueezepy.configs import gdefaults
from ssqueezepy import Wavelet, TestSignals, ssq_cwt, issq_cwt, cwt, icwt
from ssqueezepy import ssq_stft, issq_stft, ssqueeze, get_window, extract_ridges
from ssqueezepy import _gmw, utils, visuals, wavelets, toolkit
from ssqueezepy.utils.common import find_closest_parallel_is_faster
from ssqueezepy.ssqueezing import _check_ssqueezing_args
#### Ensure cached imports reloaded ##########################################
from types import ModuleType
from imp import reload
import ssqueezepy
def reload_all():
reload(ssqueezepy)
for name in dir(ssqueezepy):
obj = getattr(ssqueezepy, name)
if isinstance(obj, ModuleType) and name in ssqueezepy._modules_toplevel:
reload(obj)
##############################################################################
# no visuals here but 1 runs as regular script instead of pytest, for debugging
VIZ = 0
def test_numba_monke():
"""Run this *at test time* rather than collection so changes
don't apply to other test files. This is for coverage of @jit'd funcs.
"""
numba.njit = njit
numba.jit = jit
print("numba.njit is now monke")
print("numba.jit is now monke")
reload(numba)
numba.njit = njit
numba.jit = jit
reload_all()
def test_ssq_cwt():
os.environ['SSQ_GPU'] = '0' # in case concurrent tests set it to '1'
np.random.seed(5)
x = np.random.randn(64)
for wavelet in ('morlet', ('morlet', {'mu': 20}), 'bump'):
Tx, *_ = ssq_cwt(x, wavelet)
issq_cwt(Tx, wavelet)
kw = dict(x=x, wavelet='morlet')
params = dict(
squeezing=('lebesgue',),
scales=('linear', 'log:minimal', 'linear:naive',
np.power(2**(1/8), np.arange(1, 32))),
difftype=('phase', 'numeric'),
padtype=('zero', 'replicate'),
maprange=('maximal', 'energy', 'peak', (1, 32)),
)
for name in params:
for value in params[name]:
errored = True
try:
if name == 'maprange' and value in ('maximal', (1, 32)):
_ = ssq_cwt(**kw, **{name: value}, scales='log', get_w=1)
else:
_ = ssq_cwt(**kw, **{name: value}, get_w=1)
errored = False
finally:
if errored:
print(f"\n{name}={value} failed\n")
_ = ssq_cwt(x, wavelet, fs=2, difftype='numeric', difforder=2, get_w=1)
_ = ssq_cwt(x, wavelet, fs=2, difftype='numeric', difforder=1, get_w=1)
def test_cwt():
os.environ['SSQ_GPU'] = '0'
x = np.random.randn(64)
Wx, *_ = cwt(x, 'morlet', vectorized=True)
_ = icwt(Wx, 'morlet', one_int=True)
_ = icwt(Wx, 'morlet', one_int=False)
Wx2, *_ = cwt(x, 'morlet', vectorized=False)
mae = np.mean(np.abs(Wx - Wx2))
assert mae <= 1e-16, f"MAE = {mae} > 1e-16 for for-loop vs vectorized `cwt`"
_ = utils.est_riskshrink_thresh(Wx, nv=32)
_ = _icwt_norm(scaletype='linear', l1_norm=False)
x[0] = np.nan
x[1] = np.inf
x[2] = -np.inf
_ = cwt(x, 'morlet', vectorized=False, derivative=True, l1_norm=False)
def test_ssq_stft():
os.environ['SSQ_GPU'] = '0'
Tsx = np.random.randn(128, 128)
pass_on_error(issq_stft, Tsx, modulated=False)
pass_on_error(issq_stft, Tsx, hop_len=2)
def test_wavelets():
os.environ['SSQ_GPU'] = '0'
for wavelet in ('morlet', ('morlet', {'mu': 4}), 'bump'):
wavelet = Wavelet(wavelet)
wavelet = Wavelet(('morlet', {'mu': 5}))
wavelet.viz(name='overview')
wavelet.info(nondim=1)
wavelet.info(nondim=0)
#### Visuals #############################################################
for name in wavelet.VISUALS:
if 'anim:' in name: # heavy-duty computations, skip animating
kw = {'testing': True}
else:
kw = {}
try:
wavelet.viz(name, N=256, **kw)
except TypeError as e:
if "positional argument" not in str(e):
raise TypeError(e)
try:
wavelet.viz(name, scale=10, N=256, **kw)
except TypeError as e:
if "positional argument" not in str(e):
raise TypeError(e)
wavelet.viz(name, scales='log', N=256, **kw)
_ = utils.cwt_scalebounds(wavelet, N=512, viz=3)
#### misc ################################################################
wavelet = Wavelet(lambda x: x)
_ = wavelets._xifn(scale=10, N=128)
def test_toolkit():
Tx = np.random.randn(20, 20)
Cs, freqband = toolkit.lin_band(Tx, slope=1, offset=.1, bw=.025)
_ = toolkit.cos_f([1], N=64)
_ = toolkit.sin_f([1], N=64)
_ = toolkit.where_amax(Tx)
_ = toolkit.mad_rms(np.random.randn(10), np.random.randn(10))
def test_visuals():
os.environ['SSQ_GPU'] = '0'
x = np.random.randn(10)
visuals.hist(x, show=1, stats=1)
y = x * (1 + 1j)
visuals.plot(y, complex=1, c_annot=1, vlines=1, ax_equal=1,
xticks=np.arange(len(y)), yticks=y)
visuals.plot(y, abs=1, vert=1, dx1=1, ticks=0)
visuals.scat(x, vlines=1, hlines=1)
visuals.scat(y, complex=1, ticks=0)
visuals.plotscat(y, show=1, xlims=(-1, 1), dx1=1, ylabel="5")
visuals.plots([y, y], tight=1, show=1)
visuals.plots([y, y], nrows=2)
visuals.plots([y, y], ncols=2)
g = np.random.randn(4, 4)
visuals.imshow(g * (1 + 2j), complex=1)
visuals.imshow(g, ridge=1, ticks=0)
pass_on_error(visuals.plot, None, None)
pass_on_error(visuals.wavelet_tf, 'morlet', notext=True)
def test_utils():
os.environ['SSQ_GPU'] = '0'
_ = utils.buffer(np.random.randn(20), 4, 1)
wavelet = Wavelet(('morlet', {'mu': 6}))
_ = wavelets.center_frequency(wavelet, viz=1)
_ = wavelets.freq_resolution( wavelet, viz=1, scale=3, force_int=0)
_ = wavelets.time_resolution( wavelet, viz=1)
xh = np.random.randn(128)
xhs = np.zeros(xh.size)
wavelets._aifftshift_even(xh, xhs)
wavelets._afftshift_even(xh, xhs)
_ = utils.padsignal(xh, padlength=len(xh)*2, padtype='symmetric')
_ = utils.padsignal(xh, padlength=len(xh)*2, padtype='wrap')
x2d = np.random.randn(4, 64)
_ = utils.padsignal(x2d, padlength=96, padtype='symmetric')
g = np.ones((128, 200))
utils.unbuffer(g, xh, 1, n_fft=len(xh), N=None, win_exp=0)
utils.unbuffer(g, xh, 1, n_fft=len(xh), N=g.shape[1], win_exp=2)
scales = utils.process_scales('log', 1024, Wavelet())
_ = utils.find_downsampling_scale(Wavelet(), scales, method='any',
viz_last=1)
_ = utils.find_downsampling_scale(Wavelet(), scales, method='all')
#### errors / warnings ###################################################
pass_on_error(utils.find_max_scale, 1, 1, -1, -1)
pass_on_error(utils.cwt_scalebounds, 1, 1, preset='etc', min_cutoff=0)
pass_on_error(utils.cwt_scalebounds, 1, 1, min_cutoff=-1)
pass_on_error(utils.cwt_scalebounds, 1, 1, min_cutoff=.2, max_cutoff=.1)
pass_on_error(utils.cwt_scalebounds, 1, 1, cutoff=0)
pass_on_error(utils.cwt_utils._assert_positive_integer, -1, 'w')
pass_on_error(utils.infer_scaletype, 1)
pass_on_error(utils.infer_scaletype, np.array([1]))
pass_on_error(utils.infer_scaletype, np.array([1., 2., 5.]))
pass_on_error(utils._process_fs_and_t, 1, np.array([1]), 2)
pass_on_error(utils._process_fs_and_t, 1, np.array([1., 2, 4]), 3)
pass_on_error(utils._process_fs_and_t, -1, None, 1)
pass_on_error(utils.make_scales, 128, scaletype='banana')
pass_on_error(utils.padsignal, np.random.randn(3, 4, 5))
def test_anim():
# bare minimally (still takes long, but covers many lines of code)
wavelet = Wavelet(('morlet', {'mu': 6}))
wavelet.viz('anim:time-frequency', N=8, scales=np.linspace(10, 20, 3))
def test_ssqueezing():
os.environ['SSQ_GPU'] = '0'
Wx = np.random.randn(4, 4)
w = np.abs(Wx)
pass_on_error(ssqueeze, Wx, w, transform='greenland')
pass_on_error(ssqueeze, Wx, w, transform='cwt', scales=None)
pass_on_error(ssqueeze, Wx, w, transform='cwt', wavelet=None,
maprange='maximal')
pass_on_error(ssqueeze, Wx, w, transform='stft', maprange='minimal')
pass_on_error(ssqueeze, Wx, w, transform='stft', ssq_freqs='linear')
pass_on_error(ssqueeze, Wx, w, transform='abs')
pass_on_error(ssqueeze, Wx, w, squeezing='big_bird')
pass_on_error(ssqueeze, Wx, w, squeezing=lambda x: x**2)
pass_on_error(ssqueeze, Wx, w, squeezing='abs')
def test_get_window():
_ = get_window('hann', win_len=128, n_fft=None)
pass_on_error(get_window, 1, 2)
def test_windows():
window = get_window(None, win_len=100, n_fft=128)
utils.window_area(window, time=True, frequency=True)
utils.window_area(window, time=True, frequency=False)
utils.window_area(window, time=False, frequency=True)
utils.window_resolution(window)
def test_morse_utils():
"""Test miscellaneous utility funcs."""
_gmw.morseafun(3, 60, 1, 'bandpass')
_gmw.morseafun(3, 60, 1, 'energy')
for n_out in range(1, 5):
_gmw.morsefreq(3, 60, n_out=n_out)
_gmw._morsemom(1, 3, 60, n_out=n_out)
_gmw._moments_to_cumulants(np.random.uniform(0, 1, 5))
pass_on_error(_gmw._check_args, gamma=-1)
pass_on_error(_gmw._check_args, beta=-1)
pass_on_error(_gmw._check_args, norm='cactus')
pass_on_error(_gmw._check_args, scale=-1)
def test_test_signals():
os.environ['SSQ_GPU'] = '0'
tsigs = TestSignals()
pass_on_error(tsigs, dft='doot')
fn = lambda *args, **kw: (np.random.randn(100, 100), {})
tsigs.test_transforms(fn)
pass_on_error(tsigs._process_input, 'etc:t')
pass_on_error(tsigs._process_input, ['a', 1])
pass_on_error(tsigs._process_input, ['a', (1, 2)])
backup = tsigs.default_args.copy()
tsigs.default_args['am-cosine'] = dict(amin=.1)
pass_on_error(tsigs._process_input, 'am-cosine')
tsigs.default_args['am-cosine'] = 2
pass_on_error(tsigs._process_input, 'am-cosine')
tsigs.default_args.update(backup)
def test_cwt_higher_order():
os.environ['SSQ_GPU'] = '0'
N = 256
tsigs = TestSignals()
x, t = tsigs.par_lchirp(N=N)
x += x[::-1]
for noise in (False, True):
if noise:
x += np.random.randn(len(x))
Wx_k, scales = cwt(x, 'gmw', order=range(3), average=False)
visuals.viz_cwt_higher_order(Wx_k, scales, 'gmw')
print("=" * 80)
_ = cwt(x, ('gmw', {'norm': 'energy'}), order=(0, 1), average=True,
l1_norm=False)
_ = cwt(x, 'gmw', order=1, average=False, derivative=True)
def test_viz_gmw_orders():
os.environ['SSQ_GPU'] = '0'
N = 256
gamma, beta, norm = 3, 60, 'bandpass'
n_orders = 3
scale = 5
visuals.viz_gmw_orders(N, n_orders, scale, gamma, beta, norm)
def test_trigdiff():
"""Ensure `trigdiff` matches `cwt(derivative=True)`."""
os.environ['SSQ_GPU'] = '0'
N = 256
x = np.random.randn(N)
Wx, _, dWx = cwt(x, derivative=True, rpadded=True)
_, n1, _ = utils.p2up(N)
dWx2 = utils.trigdiff(Wx, rpadded=True, N=N, n1=n1)
dWx = dWx[:, n1:n1+N]
mae = np.mean(np.abs(dWx - dWx2))
th = 1e-15 if dWx.dtype == np.cfloat else 1e-7
assert mae < th, mae
def test_logscale_transition_idx():
"""Ensure the function splits `idx` such that `scales` are split as
`[scales[:idx], scales[idx:]]`
"""
scales = np.exp(np.linspace(0, 5, 512))
idx = 399
for downsample in (2, 3, 4):
scales1 = scales[:idx]
scales2 = scales[idx + downsample - 1::downsample]
scales = np.hstack([scales1, scales2])
tidx = utils.logscale_transition_idx(scales)
assert idx == tidx, "{} != {}".format(idx, tidx)
def test_dtype():
"""Ensure `cwt` and `ssq_cwt` compute at appropriate precision depending
on `Wavelet.dtype`, returning float32 & complex64 arrays for single precision.
"""
os.environ['SSQ_GPU'] = '0'
wav32, wav64 = Wavelet(dtype='float32'), Wavelet(dtype='float64')
x = np.random.randn(256)
outs32 = ssq_cwt(x, wav32)
outs64 = ssq_cwt(x, wav64)
outs32_o2 = ssq_cwt(x, wav32, order=2)
names = ('Tx', 'Wx', 'ssq_freqs', 'scales', 'w', 'dWx')
outs32 = {k: v for k, v in zip(names, outs32)}
outs32_o2 = {k: v for k, v in zip(names, outs32_o2)}
outs64 = {k: v for k, v in zip(names, outs64)}
for k, v in outs32.items():
if k == 'ssq_freqs':
assert v.dtype == np.float64, ("float32", k, v.dtype)
continue
assert v.dtype in (np.float32, np.complex64), ("float32", k, v.dtype)
for k, v in outs32_o2.items():
if k == 'ssq_freqs':
assert v.dtype == np.float64, ("float32", k, v.dtype)
continue
assert v.dtype in (np.float32, np.complex64), ("float32", k, v.dtype)
for k, v in outs64.items():
if k == 'ssq_freqs':
assert v.dtype == np.float64, ("float32", k, v.dtype)
continue
assert v.dtype in (np.float64, np.complex128), ("float64", k, v.dtype)
def test_find_closest_parallel_is_faster():
find_closest_parallel_is_faster((50, 200))
def test_wavelet_info():
for parallel in ('0', '1'):
os.environ['SSQ_PARALLEL'] = parallel
Wavelet(('gmw', {'norm': 'bandpass'})).info()
Wavelet(('gmw', {'norm': 'energy'})).info()
Wavelet(('gmw', {'norm': 'bandpass', 'order': 1})).info()
Wavelet(('gmw', {'norm': 'energy', 'order': 1})).info()
for name in ('morlet', 'bump', 'cmhat', 'hhhat'):
Wavelet(name).info()
def test_ridge_extraction():
"""For @jit coverage."""
Wx, scales = cwt(np.random.randn(128))
_ = extract_ridges(Wx, scales, transform='cwt', parallel=False)
_ = extract_ridges(Wx, scales, transform='cwt', parallel=True)
def test_check_ssqueezing_args():
pass_on_error(_check_ssqueezing_args, 1)
pass_on_error(_check_ssqueezing_args, 'sum', maprange=('a', 'b'))
pass_on_error(_check_ssqueezing_args, 'sum', maprange=dict(a=1))
pass_on_error(_check_ssqueezing_args, 'sum', maprange='peak')
pass_on_error(_check_ssqueezing_args, 'sum', difftype='o')
pass_on_error(_check_ssqueezing_args, 'sum', difftype='phase', get_w=0)
pass_on_error(_check_ssqueezing_args, 'sum', difftype='phase', difforder=4,
get_w=1)
pass_on_error(_check_ssqueezing_args, 'sum', difftype='numeric', difforder=3,
get_w=1)
_check_ssqueezing_args('sum', difftype='phase', difforder=4, get_w=1)
_check_ssqueezing_args('sum', maprange='peak', transform='stft')
def test_misc():
_ = cwt(np.random.randn(128), 'gmw', cache_wavelet=True)
_ = cwt(np.random.randn(128), Wavelet(), cache_wavelet=True, vectorized=False)
_ = ssq_stft(np.random.randn(100), get_w=1, get_dWx=1)
pass_on_error(cwt, np.random.randn(2, 2, 2))
pass_on_error(cwt, 5)
pass_on_error(ssq_stft, np.random.randn(2, 2, 2), get_w=1)
def test_configs():
pass_on_error(gdefaults, None)
def pass_on_error(fn, *args, **kw):
try: fn(*args, **kw)
except: pass
if __name__ == '__main__':
if VIZ:
test_ssq_cwt()
test_cwt()
test_ssq_stft()
test_wavelets()
test_toolkit()
test_visuals()
test_utils()
test_anim()
test_ssqueezing()
test_get_window()
test_windows()
test_morse_utils()
test_test_signals()
test_cwt_higher_order()
test_viz_gmw_orders()
test_trigdiff()
test_logscale_transition_idx()
test_dtype()
test_find_closest_parallel_is_faster()
test_wavelet_info()
test_ridge_extraction()
test_check_ssqueezing_args()
test_misc()
test_configs()
else:
pytest.main([__file__, "-s"])
# restore original in case it matters for future testing
reload(numba)
numba.njit = njit_orig
numba.jit = jit_orig
reload_all()
print("numba.njit is no longer monke")
print("numba.jit is no longer monke")
| 16,821
| 31.919765
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/ridge_extraction_test.py
|
# -*- coding: utf-8 -*-
"""Coverage-mainly tests; see examples/extracting_ridges.py for more test cases.
"""
import os
import pytest
import numpy as np
import scipy.signal as sig
from ssqueezepy import ssq_cwt, ssq_stft, extract_ridges, cwt
from ssqueezepy.visuals import plot, imshow
# set to 1 to run tests as functions, showing plots
VIZ = 0
os.environ['SSQ_GPU'] = '0' # in case concurrent tests set it to '1'
def test_basic():
"""Example ridge from similar example as can be found at MATLAB:
https://www.mathworks.com/help/wavelet/ref/wsstridge.html#bu6we25-penalty
"""
test_matrix = np.array([[1, 4, 4], [2, 2, 2], [5, 5, 4]])
fs_test = np.exp([1, 2, 3])
ridge_idxs, *_ = extract_ridges(test_matrix, fs_test, penalty=2.0,
get_params=True)
assert np.allclose(ridge_idxs, np.array([[2, 2, 2]]))
def test_poly():
"""Cubic polynomial frequency variation + pure tone."""
N, f = 257, 0.5
padtype = 'wrap'
penalty = 2.0
t = np.linspace(0, 10, N, endpoint=True)
p1 = np.poly1d([0.025, -0.36, 1.25, 2.0])
x1 = sig.sweep_poly(t, p1)
x2 = np.sin(2*np.pi * f * t)
x = x1 + x2
tf_transforms(x, t, padtype=padtype, stft_bw=4, penalty=penalty)
def viz(x, Tf, ridge_idxs, yticks=None, ssq=False, transform='cwt', show_x=True):
if not VIZ:
return
if show_x:
plot(x, title="x(t)", show=1,
xlabel="Time [samples]", ylabel="Signal Amplitude [A.U.]")
if transform == 'cwt' and not ssq:
Tf = np.flipud(Tf)
ridge_idxs = len(Tf) - ridge_idxs
ylabel = ("Frequency scales [1/Hz]" if (transform == 'cwt' and not ssq) else
"Frequencies [Hz]")
title = "abs({}{}) w/ ridge_idxs".format("SSQ_" if ssq else "",
transform.upper())
ikw = dict(abs=1, cmap='turbo', yticks=yticks, title=title)
pkw = dict(linestyle='--', color='k', xlabel="Time [samples]", ylabel=ylabel,
xlims=(0, Tf.shape[1]), ylims=(0, len(Tf)))
imshow(Tf, **ikw, show=0)
plot(ridge_idxs, **pkw, show=1)
def tf_transforms(x, t, wavelet='morlet', window=None, padtype='wrap',
penalty=.5, n_ridges=2, cwt_bw=15, stft_bw=15,
ssq_cwt_bw=4, ssq_stft_bw=4):
os.environ['SSQ_GPU'] = '0'
kw_cwt = dict(t=t, padtype=padtype)
kw_stft = dict(fs=1/(t[1] - t[0]), padtype=padtype)
Twx, Wx, ssq_freqs_c, scales, *_ = ssq_cwt(x, wavelet, **kw_cwt)
Tsx, Sx, ssq_freqs_s, Sfs, *_ = ssq_stft(x, window, **kw_stft)
ckw = dict(penalty=penalty, n_ridges=n_ridges, transform='cwt')
skw = dict(penalty=penalty, n_ridges=n_ridges, transform='stft')
cwt_ridges = extract_ridges(Wx, scales, bw=cwt_bw, **ckw)
ssq_cwt_ridges = extract_ridges(Twx, ssq_freqs_c, bw=ssq_cwt_bw, **ckw)
stft_ridges = extract_ridges(Sx, Sfs, bw=stft_bw, **skw)
ssq_stft_ridges = extract_ridges(Tsx, ssq_freqs_s, bw=ssq_stft_bw, **skw)
viz(x, Wx, cwt_ridges, scales, ssq=0, transform='cwt', show_x=1)
viz(x, Twx, ssq_cwt_ridges, ssq_freqs_c, ssq=1, transform='cwt', show_x=0)
viz(x, Sx, stft_ridges, Sfs, ssq=0, transform='stft', show_x=0)
viz(x, Tsx, ssq_stft_ridges, ssq_freqs_s, ssq=1, transform='stft', show_x=0)
def test_parallel():
"""Ensure `parallel=True` output matches that of `=False`."""
for N in (255, 512):
x = np.random.randn(N)
Wx, scales = cwt(x)
out0 = extract_ridges(Wx, scales, parallel=False)
out1 = extract_ridges(Wx, scales, parallel=True)
adiff = np.abs(out0 - out1)
assert np.allclose(out0, out1), "N=%s, Max err: %s" % (N, adiff.max())
if __name__ == '__main__':
if VIZ:
test_basic()
test_poly()
test_parallel()
else:
pytest.main([__file__, "-s"])
| 3,910
| 34.554545
| 81
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/adm_coef_test.py
|
# -*- coding: utf-8 -*-
"""Test accuracy and stability of computing `adm_cwt` and `adm_ssq`
via numeric integration.
Unstable integration will yield values close to zero, whereass admissibility
coefficients for majority of wavelet configurations will evaluate to a
'not very small' value (set to 1e-3 here).
"""
import pytest
import numpy as np
from ssqueezepy.utils import adm_cwt, adm_ssq
VIZ = 0 # set to 1 to visualize wavelet adm-coef dependence on their params
def _test(make_wavelet, params, th=1e-3):
acwt = np.zeros(len(params))
assq = acwt.copy()
for i, param in enumerate(params):
wavelet = make_wavelet(param)
acwt[i] = adm_cwt(wavelet)
assq[i] = adm_ssq(wavelet)
if VIZ:
_viz(acwt, assq, params)
if not np.all(acwt > th):
raise AssertionError(f"th={th}")
if not np.all(assq > th):
raise AssertionError(f"th={th}")
def test_morlet():
mus = np.linspace(4, 30, 200)
make_wavelet = lambda mu: ('morlet', {'mu': mu})
_test(make_wavelet, params=mus)
def test_bump():
mus = np.linspace(4, 30, 200)
make_wavelet = lambda mu: ('bump', {'mu': mu})
_test(make_wavelet, params=mus)
def test_cmhat():
mus = np.linspace(4, 30, 200)
make_wavelet = lambda mu: ('cmhat', {'mu': mu})
_test(make_wavelet, params=mus)
def test_hhhat():
mus = np.linspace(4, 30, 200)
make_wavelet = lambda mu: ('hhhat', {'mu': mu})
_test(make_wavelet, params=mus)
def _viz(acwt, assq, params):
plt.plot(params, acwt)
plt.plot(params, assq)
mx = max(acwt.max(), assq.max())
plt.ylim(-.05 * mx, None)
plt.show()
if __name__ == '__main__':
if VIZ:
import matplotlib.pyplot as plt
test_morlet()
test_bump()
test_cmhat()
test_hhhat()
else:
pytest.main([__file__, "-s"])
| 1,855
| 24.081081
| 76
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/reconstruction_test.py
|
# -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
from ssqueezepy import ssq_cwt, issq_cwt, ssq_stft, issq_stft
from ssqueezepy import cwt, icwt, stft, istft
from ssqueezepy._stft import get_window
from ssqueezepy.toolkit import lin_band
try:
from librosa import stft as lstft
except Exception as e:
import logging
logging.warn("librosa import failed with:\n%s" % str(e))
VIZ = 0 # set to 1 to enable various visuals and run without pytest
os.environ['SSQ_GPU'] = '0' # in case concurrent tests set it to '1'
#### Helper methods ##########################################################
def _t(min, max, N):
return np.linspace(min, max, N, endpoint=False)
def cos_f(freqs, N=128, phi=0):
return np.concatenate([np.cos(2 * np.pi * f * (_t(i, i + 1, N) + phi))
for i, f in enumerate(freqs)])
def mad_rms(x, xrec):
"""Reconstruction error metric; scale-invariant, robust to outliers
and partly sparsity. https://stats.stackexchange.com/q/495242/239063"""
return np.mean(np.abs(x - xrec)) / np.sqrt(np.mean(x**2))
#### Test signals ############################################################
def echirp(N):
t = _t(0, 10, N)
return np.cos(2 * np.pi * 3 * np.exp(t / 3)), t
def lchirp(N):
t = _t(0, 10, N)
return np.cos(np.pi * t**2), t
def _freqs(N, freqs):
x = cos_f(freqs, N // len(freqs))
ts = _t(0, len(x) / N, len(x))
return x, ts
def fast_transitions(N):
return _freqs(N, np.array([N/100, N/200, N/3, N/20,
N/3-1, N/50, N/4, N/150]) / 8)
def low_freqs(N):
return _freqs(N, [.3, .3, 1, 1, 2, 2])
def high_freqs(N):
return _freqs(N, np.array([N/2, N/2-1, N/4, N/3]) / 4)
#### Tests ###################################################################
test_fns = (echirp, lchirp, fast_transitions, low_freqs, high_freqs)
wavelet = ('gmw', {'beta': 8, 'dtype': 'float64'})
th = .1
def test_ssq_cwt():
errs = []
for fn in test_fns:
x, ts = fn(2048) # may not return length 2048
for scales in ('log', 'log-piecewise', 'linear'):
if fn.__name__ == 'low_freqs':
if scales == 'linear':
# 'linear' default can't handle low frequencies for large N
# 'log-piecewise' maps it too sparsely
continue
else:
scales = f'{scales}:maximal'
Tx, *_ = ssq_cwt(x, wavelet, scales=scales, nv=32, t=ts)
xrec = issq_cwt(Tx, wavelet)
errs.append(round(mad_rms(x, xrec), 5))
title = "abs(SSQ_CWT) | {}, scales='{}'".format(fn.__qualname__,
scales)
_maybe_viz(Tx, x, xrec, title, errs[-1])
assert errs[-1] < th, (errs[-1], fn.__name__, scales)
print("\nssq_cwt PASSED\nerrs:", ', '.join(map(str, errs)))
def test_cwt():
errs = []
for fn in test_fns:
x, ts = fn(2048)
for l1_norm in (True, False):
scales = ('log:maximal' if fn.__name__ in ('low_freqs', 'high_freqs')
else 'log')
# 'linear' default can't handle low frequencies for large N
kw = dict(wavelet=wavelet, scales=scales, l1_norm=l1_norm, nv=32)
Wx, *_ = cwt(x, t=ts, **kw)
xrec = icwt(Wx, one_int=True, **kw)
errs.append(round(mad_rms(x, xrec), 5))
title = f"abs(CWT) | l1_norm={l1_norm}"
title = "abs(CWT) | {}, l1_norm={}".format(fn.__qualname__,
l1_norm)
_maybe_viz(Wx, x, xrec, title, errs[-1])
assert errs[-1] < th, (errs[-1], fn.__name__, f"l1_norm: {l1_norm}")
print("\ncwt PASSED\nerrs:", ', '.join(map(str, errs)))
def test_cwt_log_piecewise():
x, ts = echirp(1024)
wavelet = 'gmw'
Tx, Wx, ssq_freqs, scales, *_ = ssq_cwt(x, wavelet, scales='log-piecewise',
t=ts, preserve_transform=True)
xrec_ssq_cwt = issq_cwt(Tx, 'gmw')
xrec_cwt = icwt(Wx, wavelet, scales=scales)
err_ssq_cwt = round(mad_rms(x, xrec_ssq_cwt), 5)
err_cwt = round(mad_rms(x, xrec_cwt), 5)
assert err_ssq_cwt < .02, err_ssq_cwt
assert err_cwt < .02, err_cwt
def test_component_inversion():
def echirp(N):
t = np.linspace(0, 10, N, False)
return np.cos(2 * np.pi * np.exp(t / 3)), t
N = 2048
noise_var = 6
x, ts = echirp(N)
x *= (1 + .3 * cos_f([1], N)) # amplitude modulation
xo = x.copy()
np.random.seed(4)
x += np.sqrt(noise_var) * np.random.randn(len(x))
wavelet = ('gmw', {'beta': 6})
Tx, *_ = ssq_cwt(x, wavelet, scales='log:maximal', nv=32, t=ts, flipud=0)
# hand-coded, subject to failure
bw, slope, offset = .035, .44, .45
Cs, freqband = lin_band(Tx, slope, offset, bw, norm=(0, 2e-1))
xrec = issq_cwt(Tx, wavelet, Cs, freqband)[0]
axof = np.abs(np.fft.rfft(xo))
axrecf = np.abs(np.fft.rfft(xrec))
err_sig = mad_rms(xo, xrec)
err_spc = mad_rms(axof, axrecf)
print("signal MAD/RMS: %.6f" % err_sig)
print("spectrum MAD/RMS: %.6f" % err_spc)
assert err_sig <= .40, f"{err_sig} > .40"
assert err_spc <= .10, f"{err_spc} > .10"
def test_stft():
"""Ensure every combination of even & odd configs can be handled;
leave window length unspecified to ensure unspecified inverts unspecified.
"""
th = 1e-14
for N in (128, 129):
x = np.random.randn(N)
for n_fft in (120, 121):
for hop_len in (1, 2, 3):
for modulated in (True, False):
kw = dict(hop_len=hop_len, n_fft=n_fft, modulated=modulated)
Sx = stft(x, dtype='float64', **kw)
xr = istft(Sx, N=len(x), **kw)
txt = ("\nSTFT: (N, n_fft, hop_len, modulated) = ({}, {}, "
"{}, {})").format(N, n_fft, hop_len, modulated)
assert len(x) == len(xr), "%s != %s %s" % (N, len(xr), txt)
mae = np.abs(x - xr).mean()
assert mae < th, "MAE = %.2e > %.2e %s" % (mae, th, txt)
def test_ssq_stft():
"""Same as `test_stft` except don't test `hop_len` or `modulated` since
only `1` and `True` are invertible (by the library, and maybe theoretically).
`window_scaling=.5` has >x2 greater MAE for some reason. May look into.
"""
th = 1e-1
for N in (128, 129):
x = np.random.randn(N)
for n_fft in (120, 121):
for window_scaling in (1., .5):
if window_scaling == 1:
window = None
else:
window = get_window(window, win_len=n_fft//1, n_fft=n_fft)
window *= window_scaling
Sx, *_ = ssq_stft(x, window=window, n_fft=n_fft)
xr = issq_stft( Sx, window=window, n_fft=n_fft)
txt = ("\nSSQ_STFT: (N, n_fft, window_scaling) = ({}, {}, {})"
).format(N, n_fft, window_scaling)
assert len(x) == len(xr), "%s != %s %s" % (N, len(xr), txt)
mae = np.abs(x - xr).mean()
assert mae < th, "MAE = %.2e > %.2e %s" % (mae, th, txt)
def test_stft_vs_librosa():
try:
lstft
except:
return
np.random.seed(0)
# try all even/odd combos
for N in (512, 513):
for hop_len in (1, 2, 3):
for n_fft in (512, 513):
for win_len in (N//8, N//8 - 1):
x = np.random.randn(N)
Sx = stft( x, n_fft=n_fft, hop_len=hop_len, win_len=win_len,
window='hann', modulated=False, dtype='float64')
lSx = lstft(x, n_fft=n_fft, hop_length=hop_len, win_length=win_len,
window='hann', pad_mode='reflect')
if n_fft % 2 == 0:
if hop_len == 1:
lSx = lSx[:, :-1]
elif (((N % 2 == 0) and hop_len == 2) or
((N % 2 == 1) and hop_len == 3)):
lSx = lSx[:, :-1]
mae = np.abs(Sx - lSx).mean()
assert np.allclose(Sx, lSx), (
"N={}, hop_len={}, n_fft={}, win_len={}, MAE={}"
).format(N, hop_len, n_fft, win_len, mae)
def _maybe_viz(Wx, xo, xrec, title, err):
if not VIZ:
return
mx = np.abs(Wx).max()
if 'SSQ' in title:
Wx = np.pad(np.flipud(Wx), [[5], [0]])
mx = .1*mx
else:
mx = .9*mx
imshow(Wx, abs=1, norm=(0, mx), cmap='turbo', show=1, title=title)
plot(xo, title="Original vs reconstructed | MAD/RMS=%.4f" % err)
plot(xrec, show=1)
if __name__ == '__main__':
if VIZ:
from ssqueezepy.visuals import plot, imshow
test_ssq_cwt()
test_cwt()
test_cwt_log_piecewise()
test_component_inversion()
test_stft()
test_ssq_stft()
test_stft_vs_librosa()
else:
pytest.main([__file__, "-s"])
| 8,954
| 32.665414
| 81
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/gmw_test.py
|
# -*- coding: utf-8 -*-
"""Generalized Morse Wavelets.
Tests:
- Implementations are `Wavelet`-compatible
- Consistency of `Wavelet`-compatible implems with that of full `morsewave`
- GMW L1 & L2 norms work as expected
"""
import os
import pytest
import numpy as np
from ssqueezepy.wavelets import Wavelet
from ssqueezepy._gmw import compute_gmw, morsewave
# no visuals here but 1 runs as regular script instead of pytest, for debugging
VIZ = 0
os.environ['SSQ_GPU'] = '0' # in case concurrent tests set it to '1'
def test_api_vs_full():
os.environ['SSQ_GPU'] = '0'
for gamma, beta in [(3, 60), (4, 80)]:
for norm in ('bandpass', 'energy'):
for scale in (1, 2):
for N in (512, 513):
kw = dict(N=N, gamma=gamma, beta=beta, norm=norm)
kw2 = dict(scale=scale, time=True, centered_scale=True,
norm_scale=True, dtype='float64')
psih_s, psi_s = compute_gmw(**kw, **kw2)
psih_f, psi_f = morsewave(**kw, freqs=1 / scale)
mad_t = np.mean(np.abs(psi_s - psi_f))
mad_f = np.mean(np.abs(psih_s - psih_f))
assert np.allclose(psi_s, psi_f), errmsg(mad_t, **kw, **kw2)
assert np.allclose(psih_s, psih_f), errmsg(mad_f, **kw, **kw2)
def test_api_vs_full_higher_order():
os.environ['SSQ_GPU'] = '0'
for gamma, beta in [(3, 60), (4, 80)]:
for order in (1, 2):
for norm in ('bandpass', 'energy'):
for scale in (1, 2):
for N in (512, 513):
kw = dict(N=N, gamma=gamma, beta=beta, norm=norm)
kw2 = dict(scale=scale, time=True, centered_scale=True,
norm_scale=True, dtype='float64')
psih_s, psi_s = compute_gmw(**kw, **kw2, order=order)
psih_f, psi_f = morsewave(**kw, freqs=1/scale, K=order + 1)
psih_f, psi_f = psih_f[:, -1], psi_f[:, -1]
mad_t = np.mean(np.abs(psi_s - psi_f))
mad_f = np.mean(np.abs(psih_s - psih_f))
assert np.allclose(psi_s, psi_f), errmsg(mad_t, **kw, **kw2)
assert np.allclose(psih_s, psih_f), errmsg(mad_f, **kw, **kw2)
def test_norm():
"""Test that L1-normed time-domain wavelet's L1 norm is fixed at 2,
and L2-normed freq-domain wavelet's L2 norm is fixed at `N`.
"""
os.environ['SSQ_GPU'] = '0'
th = 1e-3
for gamma, beta in [(3, 60), (4, 80)]:
for norm in ('bandpass', 'energy'):
for scale in (2, 3):
for N in (512, 513):
for centered_scale in (True, False):
kw = dict(N=N, scale=scale, gamma=gamma, beta=beta, norm=norm,
centered_scale=centered_scale, time=True,
norm_scale=True, dtype='float64')
psih, psi = compute_gmw(**kw)
if norm == 'bandpass':
l1_t = np.sum(np.abs(psi))
assert abs(l1_t - 2) < th, errmsg(abs(l1_t - 2), **kw)
elif norm == 'energy':
l2_f = np.sum(np.abs(psih)**2)
assert abs(l2_f - N) < th, errmsg(abs(l2_f - N), **kw)
def test_wavelet():
"""Test that `gmw` is a valid `Wavelet`."""
os.environ['SSQ_GPU'] = '0'
wavelet = Wavelet('gmw')
wavelet.info()
wavelet.viz()
wavelet = Wavelet(('gmw', {'gamma': 3, 'beta': 60, 'norm': 'energy',
'centered_scale': True}))
wavelet.info()
wavelet.viz()
def errmsg(err, scale, gamma, beta, N, norm, centered_scale, **other):
return ("err={:.2e} (gamma, beta, scale, N, norm, centered_scale) = "
"({}, {}, {}, {}, {}, {})"
).format(err, gamma, beta, scale, N, norm, centered_scale)
if __name__ == '__main__':
if VIZ:
test_api_vs_full()
test_api_vs_full_higher_order()
test_norm()
test_wavelet()
else:
pytest.main([__file__, "-s"])
| 3,959
| 34.675676
| 79
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/props_test.py
|
# -*- coding: utf-8 -*-
"""Test computation of various wavelet properties (time-frequency resolution,
center frequency, etc). Note that these don't grid-search but sweep one
parameter at a time while keeping others fixed.
Certain thresholds were set greater as they fail on Travis (but not on author's
machine).
"""
import pytest
import numpy as np
import matplotlib.pyplot as plt
from ssqueezepy.wavelets import Wavelet, time_resolution, freq_resolution
from ssqueezepy.wavelets import center_frequency
from ssqueezepy.visuals import scat
VIZ = 0 # set to 1 to enable various visuals and run without pytest
def test_energy_center_frequency():
"""If kind='energy' passes, long shot for 'peak' to fail, so just test
former; would still be interesting to investigate 'peak' vs 'energy',
esp. at scale extrema.
"""
def _test_mu_dependence(wc0, mu0, scale0, N0, th):
"""wc ~ mu"""
mus = np.arange(mu0 + 1, 21)
errs = np.zeros(len(mus))
for i, mu in enumerate(mus):
wavelet = Wavelet(('morlet', {'mu': mu, 'dtype': 'float64'}))
wc = center_frequency(wavelet, scale=scale0, N=N0, kind='energy')
errs[i] = abs((wc / wc0) - (mu / mu0))
_assert_and_viz(th, errs, mus, 'mu',
"Center frequency (energy), morlet")
def _test_scale_dependence(wc0, mu0, scale0, N0, th):
"""wc ~ 1/scale
For small `scale`, the bell is trimmed and the (energy) center frequency
is no longer at mode/peak (both of which are also trimmed), but is
less; we don't test these to keep code clean.
"""
scales = 2**(np.arange(16, 53) / 8) # [4, ..., 90.5]
errs = np.zeros(len(scales))
wavelet = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
for i, scale in enumerate(scales):
wc = center_frequency(wavelet, scale=scale, N=N0, kind='energy')
errs[i] = abs((wc / wc0) - (scale0 / scale))
_assert_and_viz(th, errs, np.log2(scales), 'log2(scale)',
"Center frequency (energy), morlet")
def _test_scale_dependence_high(wc0, mu0, scale0, N0, th):
"""wc ~ 1/scale
High `scale` subject to more significant discretization error in
frequency domain.
"""
scales = 2**(np.arange(53, 81) / 8) # [90.5, ..., 1024]
errs = np.zeros(len(scales))
wavelet = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
for i, scale in enumerate(scales):
wc = center_frequency(wavelet, scale=scale, N=N0, kind='energy')
errs[i] = abs((wc / wc0) - (scale0 / scale))
_assert_and_viz(th, errs, np.log2(scales), 'log2(scale)',
"Center frequency (energy), morlet | High scales")
def _test_N_dependence(wc0, mu0, scale0, N0, th):
"""Independent"""
Ns = (np.array([.25, .5, 2, 4, 9]) * N0).astype('int64')
errs = np.zeros(len(Ns))
wavelet = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
for i, N in enumerate(Ns):
wc = center_frequency(wavelet, scale=scale0, N=N, kind='energy')
errs[i] = abs(wc - wc0)
_assert_and_viz(th, errs, Ns, 'N',
"Center frequency (energy), morlet")
mu0 = 5
scale0 = 10
N0 = 1024
wavelet0 = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
wc0 = center_frequency(wavelet0, scale=scale0, N=N0, kind='energy')
args = (wc0, mu0, scale0, N0)
_test_mu_dependence( *args, th=1e-7)
_test_scale_dependence( *args, th=1e-14)
_test_scale_dependence_high(*args, th=1e-1)
_test_N_dependence( *args, th=1e-14)
def test_time_resolution():
"""Some thresholds are quite large per center_frequency(, kind='peak') as
opposed to 'energy', with force_int=True, especially for large scales.
These are per great deviations from continuous-time counterparts,
but this is simply a reflection of discretization limitations (trimmed or
unsmooth bell) and finite precision error.
"""
def _test_mu_dependence(std_t_nd0, std_t_d0, mu0, scale0, N0, th):
"""Nondimensional: std_t ~ 1/mu -- Dimensional: independent"""
mus = np.arange(mu0 + 1, 21)
errs = np.zeros((2, len(mus)))
for i, mu in enumerate(mus):
wavelet = Wavelet(('morlet', {'mu': mu, 'dtype': 'float64'}))
std_t_nd = time_resolution(wavelet, scale0, N0, nondim=True)
std_t_d = time_resolution(wavelet, scale0, N0, nondim=False)
errs[0, i] = abs((std_t_nd / std_t_nd0) - (mu / mu0))
errs[1, i] = abs(std_t_d - std_t_d0)
_assert_and_viz(th, errs, 2*[mus], 'mu',
"Time resolution, morlet")
def _test_scale_dependence(std_t_nd0, std_t_d0, mu0, scale0, N0, th):
"""Nondimensional: independent* -- Dimensional: std_t ~ scale.
*Nondimensional breaks down for low scales (<~3), where freq-domain
wavelet is trimmed (even beyond mode), deviating center frequency from
continuous-time counterpart.
Particularly large `th` per default `(min_decay, max_mult) = (1e6, 2)`
in `time_resolution`, which reasonably limits the extended wavelet
duration in time-domain in (paddded) CWT (but this limitation might be
undue; I'm unsure. https://dsp.stackexchange.com/q/70810/50076).
_nd here is just an extra division by 'peak' center_frequency.
"""
scales = 2**(np.arange(16, 81) / 8) # [4, ..., 1024]
errs = np.zeros((2, len(scales)))
wavelet = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
kw = dict(wavelet=wavelet, N=N0, force_int=False)
for i, scale in enumerate(scales):
std_t_nd = time_resolution(**kw, scale=scale, nondim=True)
std_t_d = time_resolution(**kw, scale=scale, nondim=False)
errs[0, i] = abs(std_t_nd - std_t_nd0)
errs[1, i] = abs((std_t_d / std_t_d0) - (scale / scale0))
_assert_and_viz(th, errs, 2*[np.log2(scales)], 'log2(scale)',
"Time resolution, morlet")
def _test_N_dependence(std_t_nd0, std_t_d0, mu0, scale0, N0, th):
"""Independent
`th` can be 1e-14 if dropping odd-sampled case where time-domain wavelet
has suboptimal decay: https://github.com/jonathanlilly/jLab/issues/13
(also dropping low-sampled case)
"""
Ns = (np.array([.1, 1/3, .5, 2, 4, 9]) * N0).astype('int64')
errs = np.zeros((2, len(Ns)))
wavelet = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
for i, N in enumerate(Ns):
std_t_nd = time_resolution(wavelet, scale0, N, nondim=True)
std_t_d = time_resolution(wavelet, scale0, N, nondim=False)
errs[0, i] = abs(std_t_nd - std_t_nd0)
errs[1, i] = abs(std_t_d - std_t_d0)
_assert_and_viz(th, errs, [Ns, Ns], 'N',
"Time resolution, morlet")
mu0 = 5
scale0 = 10
N0 = 1024
wavelet0 = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
std_t_nd0 = time_resolution(wavelet0, scale0, N0, nondim=True)
std_t_d0 = time_resolution(wavelet0, scale0, N0, nondim=False)
args = (std_t_nd0, std_t_d0, mu0, scale0, N0)
_test_mu_dependence( *args, th=[1e-1, 1e-6])
_test_scale_dependence(*args, th=[2e-0, 1e-1])
_test_N_dependence( *args, th=[1e-1, 1e-8])
def test_freq_resolution():
def _test_mu_dependence(std_w_nd0, std_w_d0, mu0, scale0, N0, th):
"""Nondimensional: std_w ~ mu -- Dimensional: independent"""
mus = np.arange(mu0 + 1, 21)
errs = np.zeros((2, len(mus)))
for i, mu in enumerate(mus):
wavelet = Wavelet(('morlet', {'mu': mu, 'dtype': 'float64'}))
std_w_nd = freq_resolution(wavelet, scale0, N0, nondim=True)
std_w_d = freq_resolution(wavelet, scale0, N0, nondim=False)
errs[0, i] = abs((std_w_nd / std_w_nd0) - (mu0 / mu))
errs[1, i] = abs(std_w_d - std_w_d0)
_assert_and_viz(th, errs, 2*[mus], 'mu',
"Frequency resolution, morlet")
def _test_scale_dependence(std_w_nd0, std_w_d0, mu0, scale0, N0, th):
"""Nondimensional: independent* -- Dimensional: std_w ~ 1/scale
Particularly large `th` per nontrivial discretization finite-precision
error for large scales, with small number of samples representing
the non-zero region. We don't "fix" this to match continuous-time
behavior again since it's more accurate per our CWT.
"""
scales = 2**(np.arange(16, 81) / 8) # [4, ..., 1024]
errs = np.zeros((2, len(scales)))
wavelet = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
for i, scale in enumerate(scales):
std_w_nd = freq_resolution(wavelet, scale, N0, nondim=True)
std_w_d = freq_resolution(wavelet, scale, N0, nondim=False)
errs[0, i] = abs(std_w_nd - std_w_nd0)
errs[1, i] = abs((std_w_d / std_w_d0) - (scale0 / scale))
_assert_and_viz(th, errs, 2*[np.log2(scales)], 'log2(scale)',
"Frequency resolution, morlet")
def _test_N_dependence(std_w_nd0, std_w_d0, mu0, scale0, N0, th):
"""Independent"""
Ns = (np.array([.25, .5, 2, 4, 9]) * N0).astype('int64')
errs = np.zeros((2, len(Ns)))
wavelet = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
for i, N in enumerate(Ns):
std_w_nd = freq_resolution(wavelet, scale0, N, nondim=True)
std_w_d = freq_resolution(wavelet, scale0, N, nondim=False)
errs[0, i] = abs(std_w_nd - std_w_nd0)
errs[1, i] = abs(std_w_d - std_w_d0)
_assert_and_viz(th, errs, 2*[Ns], 'N',
"Frequency resolution, morlet")
mu0 = 5
scale0 = 10
N0 = 1024
wavelet0 = Wavelet(('morlet', {'mu': mu0, 'dtype': 'float64'}))
std_w_nd0 = freq_resolution(wavelet0, scale0, N0, nondim=True)
std_w_d0 = freq_resolution(wavelet0, scale0, N0, nondim=False)
args = (std_w_nd0, std_w_d0, mu0, scale0, N0)
_test_mu_dependence( *args, th=(1e-2, 1e-6))
_test_scale_dependence(*args, th=(6e-1, 1e-1))
_test_N_dependence( *args, th=(1e-2, 1e-13))
def _assert_and_viz(th, errs, params, pname, test_name, logscale=True):
"""Tuple errs for coloring"""
def _list_and_copy(arrs):
# copy() to ensure external arrays are unaffected
if not isinstance(arrs, list):
arrs = [arrs.copy()]
else:
ls = []
for arr in arrs:
ls.append(arr.copy())
arrs = ls
return arrs
errs_all = np.atleast_2d(errs)
params_all = np.atleast_2d(params)
th_all = np.atleast_1d(th)
had_error = False
for th, errs, params in zip(th_all, errs_all, params_all):
for err, param in zip(errs, params):
if err > th:
had_error = True
break
if had_error:
ee, eparam, eth = err, param, th
break
if VIZ:
title=f"{test_name}: abs(err) vs {pname}"
if logscale:
title = title.replace("abs(err)", "log10(abs(err))")
for errs in errs_all:
errs[errs < 1e-15] = 1e-15
errs[:] = np.log10(errs)
th_all = np.log10(th_all)
colors = 'blue orange green red'.split()
for i, (errs, params) in enumerate(zip(errs_all, params_all)):
scat(params, errs, title=title)
plt.axhline(th_all[i], color="tab:%s" % colors[i])
plt.show()
if logscale:
th_all = 10**th_all # undo for AssertionError
if had_error:
raise AssertionError("%.2e > %.1e, %s=%.2f" % (ee, eth, pname, eparam))
if __name__ == '__main__':
if VIZ:
test_energy_center_frequency()
test_freq_resolution()
test_time_resolution()
else:
pytest.main([__file__, "-s"])
| 12,157
| 38.093248
| 80
|
py
|
ssqueezepy
|
ssqueezepy-master/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
ssqueezepy
|
ssqueezepy-master/tests/test_signals_test.py
|
# -*- coding: utf-8 -*-
"""Test ssqueezepy/_test_signals.py"""
import os
import pytest
import warnings
import numpy as np
import scipy.signal as sig
from ssqueezepy import Wavelet, TestSignals
from ssqueezepy.utils import window_resolution
VIZ = 0
os.environ['SSQ_GPU'] = '0' # in case concurrent tests set it to '1'
try:
import torch
torch.tensor(1, device='cuda')
CAN_GPU = True
except:
CAN_GPU = False
warnings.warn("SKIPPED TESTS in `test_signals_test.py`, GPU not found.")
def test_demo():
tsigs = TestSignals(N=256)
dft = (None, 'rows', 'cols')[0]
tsigs.demo(dft=dft)
signals = [
'am-cosine',
('hchirp', dict(fmin=.2)),
('sine:am-cosine', (dict(f=32, phi0=1), dict(amin=.3))),
]
tsigs.demo(signals, N=256)
tsigs.demo(signals, dft='rows')
tsigs.demo(signals, dft='cols')
def test_wavcomp():
os.environ['SSQ_GPU'] = '0'
tsigs = TestSignals(N=256)
wavelets = [Wavelet(('gmw', {'beta': 5})),
Wavelet(('gmw', {'beta': 22})),
]
tsigs.wavcomp(wavelets)
# test name-param pair, and ability to auto-set `N`
N_all = [256, None]
signals_all = [[('#echirp', dict(fmin=.1))],
[('lchirp', dict(fmin=1, fmax=60, tmin=0, tmax=5))]]
for N, signals in zip(N_all, signals_all):
tsigs.wavcomp(wavelets, signals=signals, N=N)
def test_cwt_vs_stft():
os.environ['SSQ_GPU'] = '0'
# (N, beta, NW): (512, 42.5, 255); (256, 21.5, 255)
N = 256#512
signals = 'all'
snr = 5
n_fft = N
win_len = n_fft#//2
tsigs = TestSignals(N=N, snr=snr)
wavelet = Wavelet(('GMW', {'beta': 21.5}))
NW = win_len//2 - 1
window = np.abs(sig.windows.dpss(win_len, NW))
# window = np.pad(window, win_len//2)
window_name = 'DPSS'
config_str = '\nNW=%s' % NW
# ensure `wavelet` and `window` have ~same time & frequency resolutions
# TODO make function to auto-find matching wavelet given window & vice versa
print("std_w, std_t, harea\nwavelet: {:.4f}, {:.4f}, {:.8f}"
"\nwindow: {:.4f}, {:.4f}, {:.8f}".format(
wavelet.std_w, wavelet.std_t, wavelet.harea,
*window_resolution(window)))
tsigs.cwt_vs_stft(wavelet, window, signals=signals, N=N, win_len=win_len,
n_fft=n_fft, window_name=window_name, config_str=config_str)
def test_ridgecomp():
os.environ['SSQ_GPU'] = '0'
N = 256
n_ridges = 3
penalty = 25
signals = 'poly-cubic'
tsigs = TestSignals(N=N)
kw = dict(N=N, signals=signals, n_ridges=n_ridges, penalty=penalty)
tsigs.ridgecomp(transform='cwt', **kw)
tsigs.ridgecomp(transform='stft', **kw)
def test_gpu():
"""Test that TestSignals can run on GPU."""
if not CAN_GPU:
return
N = 256
tsigs = TestSignals(N=N)
window = np.abs(sig.windows.dpss(N, N//2 - 1))
signals = 'par-lchirp'
os.environ['SSQ_GPU'] = '1'
wavelet = Wavelet()
tsigs.cwt_vs_stft(wavelet, window, signals=signals, N=N)
os.environ['SSQ_GPU'] = '0'
if __name__ == '__main__':
if VIZ:
test_demo()
test_wavcomp()
test_cwt_vs_stft()
test_ridgecomp()
test_gpu()
else:
pytest.main([__file__, "-s"])
| 3,282
| 26.358333
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/_cwt.py
|
# -*- coding: utf-8 -*-
import numpy as np
from .utils import fft, ifft, ifftshift, FFT_GLOBAL
from .utils import WARN, adm_cwt, adm_ssq, _process_fs_and_t
from .utils import padsignal, process_scales, logscale_transition_idx
from .utils import backend as S
from .utils.backend import Q
from .algos import replace_at_inf_or_nan
from .wavelets import Wavelet
def cwt(x, wavelet='gmw', scales='log-piecewise', fs=None, t=None, nv=32,
l1_norm=True, derivative=False, padtype='reflect', rpadded=False,
vectorized=True, astensor=True, cache_wavelet=None, order=0, average=None,
nan_checks=None, patience=0):
"""Continuous Wavelet Transform, discretized, as described in
Sec. 4.3.3 of [1] and Sec. IIIA of [2]. Uses FFT convolution via frequency-
domain wavelets matching (padded) input's length.
Uses `Wavelet.dtype` precision.
# Arguments:
x: np.ndarray / torch.Tensor
Input vector(s), 1D / 2D.
2D: does *not* do 2D CWT. Instead, treats dim0 as separate inputs,
e.g. `(n_channels, time)`, improving speed & memory w.r.t. looping.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. See `ssqueezepy.wavs()`
or `Wavelet.SUPPORTED`.
- tuple: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
See `help(wavelets.Wavelet)`.
scales: str['log', 'log-piecewise', 'linear', 'log:maximal', ...]
/ np.ndarray
CWT scales.
- 'log': exponentially distributed scales, as pow of 2:
`[2^(1/nv), 2^(2/nv), ...]`
- 'log-piecewise': 'log' except very high `scales` are downsampled
to prevent redundancy. This is recommended. See
https://github.com/OverLordGoldDragon/ssqueezepy/issues/
29#issuecomment-776792726
- 'linear': linearly distributed scales.
!!! this scheme is not recommended; use with caution
str assumes default `preset` of `'minimal'` for low scales and
`'maximal'` for high, which can be changed via e.g. 'log:maximal'.
See `preset` in `help(utils.cwt_scalebounds)`.
nv: int
Number of voices (wavelets per octave). Suggested >= 16.
fs: float / None
Sampling frequency of `x`. Defaults to 1, which for
`maprange='maximal'` makes ssq frequencies range from 1/dT to 0.5*fs,
i.e. as fraction of reference sampling rate up to Nyquist limit;
dT = total duration (N/fs).
Used to compute `dt`, which is only used if `derivative=True`.
Overridden by `t`, if provided.
Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Used to compute `dt`, which is only used if `derivative=True`.
Overrides `fs` if not None.
l1_norm: bool (default True)
Whether to L1-normalize the CWT, which yields a more representative
distribution of energies and component amplitudes than L2 (see [3],
[6]). If False (default True), uses L2 norm.
derivative: bool (default False)
Whether to compute and return `dWx`. Requires `fs` or `t`.
padtype: str / None
Pad scheme to apply on input. See `help(utils.padsignal)`.
`None` -> no padding.
rpadded: bool (default False)
Whether to return padded Wx and dWx.
`False` drops the added padding per `padtype` to return Wx and dWx
of .shape[1] == len(x).
vectorized: bool (default True)
Whether to compute quantities for all scales at once, which is
faster but uses more memory.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
cache_wavelet: bool (default None) / None
If True, will store `wavelet` computations for all `scales` in
`wavelet._Psih` (only if `vectorized`).
- Defaults to True if `wavelet` is passed that's a `Wavelet`,
throws warning if True with non-`Wavelet` `wavelet` and sets self
to False (since the array's discarded at `return` anyway).
- Ignored with `order > 2`, defaults to False.
order: int (default 0) / tuple[int] / range
> 0 computes `cwt` with higher-order GMWs. If tuple, computes
`cwt` at each specified order. See `help(_cwt.cwt_higher_order)`.
average: bool / None
Only used for tuple `order`; see `help(_cwt.cwt_higher_order)`.
nan_checks: bool / None
Checks whether input has `nan` or `inf` values, and zeros them.
`False` saves compute. Doesn't support torch inputs.
Defaults to `True` for NumPy inputs, else `False`.
patience: int / tuple[int, int]
pyFFTW parameter for faster FFT on CPU; see `help(ssqueezepy.FFT)`.
# Returns:
Wx: [na x n] np.ndarray (na = number of scales; n = len(x))
CWT of `x`. (rows=scales, cols=timeshifts)
scales: [na] np.ndarray
Scales at which CWT was computed.
dWx: [na x n] np.ndarray (if `derivative=True`)
Time-derivative of the CWT of `x`, computed via frequency-domain
differentiation (effectively, derivative of trigonometric
interpolation; see [4]). Implements as described in Sec IIIB of [2].
# References:
1. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications. G. Thakur,
E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. How to validate a wavelet filterbank (CWT)? John Muradeli.
https://dsp.stackexchange.com/a/86069/50076
4. The Exponential Accuracy of Fourier and Chebyshev Differencing Methods.
E. Tadmor.
http://webhome.auburn.edu/~jzl0097/teaching/math_8970/Tadmor_86.pdf
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
cwt_fw.m
6. Rectification of the Bias in the Wavelet Power Spectrum.
Y. Liu, X. S. Liang, R. H. Weisberg.
http://ocg6.marine.usf.edu/~liu/Papers/Liu_etal_2007_JAOT_wavelet.pdf
"""
def _vectorized(xh, scales, wavelet, derivative, cache_wavelet):
if cache_wavelet:
Psih_xh = wavelet.Psih(scale=scales, nohalf=False) * xh
else:
Psih_xh = wavelet(scale=scales, nohalf=False) * xh
Wx = ifft(Psih_xh, axis=-1, astensor=True)
if derivative:
Psih_xh *= (1j * wavelet.xi / dt)
dWx = ifft(Psih_xh, axis=-1, astensor=True)
return (Wx, dWx) if derivative else (Wx, None)
def _for_loop(xh, scales, wavelet, derivative, is_2D):
cdtype = 'complex128' if S.is_dtype(xh, 'complex128') else 'complex64'
shape = ((len(scales), xh.shape[-1]) if not is_2D else
(len(xh), len(scales), xh.shape[-1]))
Wx = S.zeros(shape, dtype=cdtype)
if derivative:
dWx = (Wx.copy() if isinstance(Wx, np.ndarray) else
Wx.detach().clone())
for i, scale in enumerate(scales):
idx = (slice(i, i + 1) if not is_2D else # Wx[i]
(slice(None), slice(i, i + 1))) # Wx[:, i]
# sample FT of wavelet at scale `a`
psih = wavelet(scale=scale, nohalf=False)
Wx[idx] = ifft(psih * xh, axis=-1, astensor=True)
if derivative:
dpsih = (1j * wavelet.xi / dt) * psih
dWx[idx] = ifft(dpsih * xh, axis=-1, astensor=True)
return (Wx, dWx) if derivative else (Wx, None)
def _process_args(x, scales, nv, fs, t, nan_checks, wavelet, cache_wavelet):
if not hasattr(x, 'ndim'):
raise TypeError("`x` must be a numpy array or torch Tensor "
"(got %s)" % type(x))
elif x.ndim not in (1, 2):
raise ValueError("`x` must be 1D or 2D (got x.ndim == %s)" % x.ndim)
if nan_checks is None:
nan_checks = bool(isinstance(x, np.ndarray))
if nan_checks:
if not isinstance(x, np.ndarray):
raise ValueError("`nan_checks=True` requires NumPy input.")
elif np.isnan(x.max()) or np.isinf(x.max()) or np.isinf(x.min()):
WARN("found NaN or inf values in `x`; will zero")
replace_at_inf_or_nan(x, replacement=0.)
if cache_wavelet:
if isinstance(wavelet, (str, tuple)):
# only check str/tuple since it'll error anyway upon other types
WARN("`cache_wavelet=True` requires a `wavelet` that's instance "
"of `Wavelet`; setting to False.")
cache_wavelet = False
elif not vectorized:
WARN("`cache_wavelet=True` requires `vectorized=True`; "
"setting to False.")
cache_wavelet = False
elif cache_wavelet is None:
cache_wavelet = (not isinstance(wavelet, (str, tuple)) and vectorized)
if not isinstance(scales, str):
nv = None
N = x.shape[-1]
dt, *_ = _process_fs_and_t(fs, t, N=N)
is_2D = (x.ndim == 2)
return N, nv, dt, is_2D, cache_wavelet
if isinstance(order, (tuple, list, range)) or order > 0:
kw = dict(wavelet=wavelet, scales=scales, fs=fs, t=t, nv=nv,
l1_norm=l1_norm, derivative=derivative, padtype=padtype,
rpadded=rpadded, vectorized=vectorized, patience=patience,
cache_wavelet=cache_wavelet)
return cwt_higher_order(x, order=order, average=average,
astensor=astensor, **kw)
(N, nv, dt, is_2D, cache_wavelet
) = _process_args(x, scales, nv, fs, t, nan_checks, wavelet, cache_wavelet)
# process `wavelet`, get its `dtype`
wavelet = _process_gmw_wavelet(wavelet, l1_norm)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
dtype = wavelet.dtype
# cast to torch early if possible (keeps as NumPy if SSQ_GPU=0)
torch_supports_padding = bool(padtype in ('zero', 'reflect', None))
if torch_supports_padding:
x = S.asarray(x, dtype)
x = S.astype(x, dtype)
# pad, ensure correct data type
if padtype is not None:
xp, _, n1, _ = padsignal(x, padtype, get_params=True)
else:
xp = x
if not torch_supports_padding:
xp = S.asarray(xp, dtype)
# take to freq-domain
xh = fft(xp, axis=-1, astensor=True)
if is_2D:
xh = xh[:, None] # insert dim1 to broadcast wavelet `scales` along
# process `scales`
scales = process_scales(scales, N, wavelet, nv=nv)
scales = S.asarray(scales, dtype)
# temporarily adjust `wavelet.N` & `FFT_GLOBAL.patience`
wavelet_N_orig = wavelet.N
wavelet.N = xp.shape[-1]
patience_orig = (FFT_GLOBAL.patience, FFT_GLOBAL.planning_timelimit)
FFT_GLOBAL.patience = patience
# take CWT
if vectorized:
Wx, dWx = _vectorized(xh, scales, wavelet, derivative, cache_wavelet)
else:
Wx, dWx = _for_loop(xh, scales, wavelet, derivative, is_2D)
# restore
wavelet.N = wavelet_N_orig
FFT_GLOBAL.patience = patience_orig
# handle unpadding, normalization
if not rpadded and padtype is not None:
# Wx[:, n1:n1 + N] if 1D else Wx[:, :, n1:n1 + N]
idx = ((slice(None), slice(n1, n1 + N)) if not is_2D else
(slice(None), slice(None), slice(n1, n1 + N)))
# shorten to pre-padded size
Wx = Wx[idx]
if derivative:
dWx = dWx[idx]
if S.is_tensor(Wx):
# ensure indexing (strides) is same, else cupy will mess up
Wx = Wx.contiguous()
if derivative:
dWx = dWx.contiguous()
if not l1_norm:
# normalize energy per L2 wavelet norm, else already L1-normalized
Wx *= S.astype(Q.sqrt(scales), Wx.dtype)
if derivative:
dWx *= S.astype(Q.sqrt(scales), Wx.dtype)
# postprocessing & return
if not astensor and S.is_tensor(Wx):
Wx, scales, dWx = [g.cpu().numpy() if S.is_tensor(g) else g
for g in (Wx, scales, dWx)]
scales = scales.squeeze()
return ((Wx, scales, dWx) if derivative else
(Wx, scales))
def icwt(Wx, wavelet='gmw', scales='log-piecewise', nv=None, one_int=True,
x_len=None, x_mean=0, padtype='reflect', rpadded=False, l1_norm=True):
"""The inverse Continuous Wavelet Transform of `Wx`, via double or
single integral.
# Arguments:
Wx: np.ndarray
CWT computed via `ssqueezepy.cwt`.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray
See help(cwt).
nv: int / None
Number of voices. Suggested >= 32. Needed if `scales` isn't array
(will default to `cwt`'s).
one_int: bool (default True)
Whether to use one-integral iCWT or double.
Current one-integral implementation performs best.
- True: Eq 2.6, modified, of [6]. Explained in [1].
- False: Eq 4.67 of [3]. Explained in [2].
x_len: int / None. Length of `x` used in forward CWT, if different
from Wx.shape[1] (default if None).
x_mean: float. mean of original `x` (not picked up in CWT since it's an
infinite scale component). Default 0.
padtype: str
Pad scheme to apply on input, in case of `one_int=False`.
See `help(utils.padsignal)`.
rpadded: bool (default False)
True if Wx is padded (e.g. if used `cwt(, rpadded=True)`).
l1_norm: bool (default True)
True if Wx was obtained via `cwt(, l1_norm=True)`.
# Returns:
x: np.ndarray
The signal, as reconstructed from Wx.
# References:
1. One integral inverse CWT. John Muradeli.
https://dsp.stackexchange.com/a/76239/50076
2. Inverse CWT derivation. John Muradeli.
https://dsp.stackexchange.com/a/71148/50076
3. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
4. Why iCWT may be inexact. John Muradeli.
https://dsp.stackexchange.com/a/87104/50076
5. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications. G. Thakur,
E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
6. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
7. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
#### Prepare for inversion ###############################################
na, n = Wx.shape
x_len = x_len or n
if not isinstance(scales, np.ndarray) and nv is None:
nv = 32 # must match forward's; default to `cwt`'s
wavelet = _process_gmw_wavelet(wavelet, l1_norm)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# will override `nv` to match `scales`'s
scales, scaletype, _, nv = process_scales(scales, x_len, wavelet, nv=nv,
get_params=True)
assert (len(scales) == na), "%s != %s" % (len(scales), na)
#### Handle piecewise scales case ########################################
# `nv` must be left unspecified so it's inferred automatically from `scales`
# in `process_scales` for each piecewise case
if scaletype == 'log-piecewise':
kw = dict(wavelet=wavelet, one_int=one_int, x_len=x_len, x_mean=x_mean,
padtype=padtype, rpadded=rpadded, l1_norm=l1_norm)
idx = logscale_transition_idx(scales)
x = icwt(Wx[:idx], scales=scales[:idx], **kw)
x += icwt(Wx[idx:], scales=scales[idx:], **kw)
return x
##########################################################################
#### Invert ##############################################################
if one_int:
x = _icwt_1int(Wx, scales, scaletype, l1_norm)
else:
x = _icwt_2int(Wx, scales, scaletype, l1_norm,
wavelet, x_len, padtype, rpadded)
# admissibility coefficient
Cpsi = (adm_ssq(wavelet) if one_int else
adm_cwt(wavelet))
if scaletype == 'log':
# Eq 4.67 in [1]; Theorem 4.5 in [1]; below Eq 14 in [2]
# ln(2**(1/nv)) == ln(2)/nv == diff(ln(scales))[0]
x *= (2 / Cpsi) * np.log(2 ** (1 / nv))
else:
x *= (2 / Cpsi)
x += x_mean # CWT doesn't capture mean (infinite scale)
return x
def _icwt_2int(Wx, scales, scaletype, l1_norm, wavelet, x_len,
padtype='zero', rpadded=False):
"""Double-integral iCWT; works with any(?) wavelet."""
# add CWT padding if it doesn't exist
if not rpadded:
Wx, n_up, n1, _ = padsignal(Wx, padtype=padtype, get_params=True)
# see help(cwt) on `norm` and `pn`
norm = _icwt_norm(scaletype, l1_norm)
pn = (-1)**np.arange(n_up)
x = np.zeros(n_up)
# TODO vectorize?
for scale, Wx_scale in zip(scales, Wx):
# TODO remove `*pn` & `ifftshift`?
psih = wavelet(scale=scale, N=n_up) * pn
xa = ifftshift(ifft(fft(Wx_scale) * psih))
x += xa.real / norm(scale)
x = x[n1:n1 + x_len] # keep the unpadded part
return x
def _icwt_1int(Wx, scales, scaletype, l1_norm):
"""One-integral iCWT; assumes analytic wavelet."""
norm = _icwt_norm(scaletype, l1_norm)
return (Wx.real / norm(scales)).sum(axis=0)
def _icwt_norm(scaletype, l1_norm):
if l1_norm:
norm = ((lambda scale: 1) if scaletype == 'log' else
(lambda scale: scale))
else:
if scaletype == 'log':
norm = lambda scale: scale**.5
elif scaletype == 'linear':
norm = lambda scale: scale**1.5
return norm
def _process_gmw_wavelet(wavelet, l1_norm):
"""Ensure `norm` for GMW is consistent with `l1_norm`."""
norm = 'bandpass' if l1_norm else 'energy'
if isinstance(wavelet, str) and wavelet.lower()[:3] == 'gmw':
wavelet = ('gmw', {'norm': norm})
elif isinstance(wavelet, tuple) and wavelet[0].lower()[:3] == 'gmw':
wavelet, wavopts = wavelet
wavopts['norm'] = wavopts.get('norm', norm)
wavelet = (wavelet, wavopts)
elif isinstance(wavelet, Wavelet):
if wavelet.name == 'GMW L2' and l1_norm:
raise ValueError("using GMW L2 wavelet with `l1_norm=True`")
elif wavelet.name == 'GMW L1' and not l1_norm:
raise ValueError("using GMW L1 wavelet with `l1_norm=False`")
return wavelet
def cwt_higher_order(x, wavelet='gmw', order=1, average=None, astensor=True,
**kw):
"""Compute `cwt` with GMW wavelets of order 0 to `order`. See `help(cwt)`.
Yields lower variance and more noise robust representation. See VI in ref[1].
# Arguments:
x: np.ndarray
Input, 1D/2D. See `help(cwt)`.
wavelet: str / wavelets.Wavelet
CWT wavelet.
order: int / tuple[int] / range
Order of GMW to use for CWT. If tuple, will compute for each
order specified in tuple, subject to `average`.
average: bool (default True if `order` is tuple)
If True, will take arithmetic mean of resulting `Wx` (and `dWx`
if `derivative=True`), else return as list. Note for phase transform,
one should compute derivative of averaged `Wx` rather than take
average of individual `dWx`s.
Ignored with non-tuple `order.
kw: dict / kwargs
Arguments to `cwt`.
If `scales` is string, will reuse zeroth-order's; zeroth order
isn't included in `order`, will set from wavelet at `order=0`.
# References
[1] Generalized Morse Wavelets. S. C. Olhede, A. T. Walden. 2002.
https://spiral.imperial.ac.uk/bitstream/10044/1/1150/1/
OlhedeWaldenGenMorse.pdf
"""
def _process_wavelet(wavelet, order):
wavelet = Wavelet._init_if_not_isinstance(wavelet)
if not wavelet.name.lower().startswith('gmw'):
raise ValueError("`wavelet` must be GMW for higher-order transforms "
"(got %s)" % wavelet.name)
wavopts = wavelet.config.copy()
wavopts.pop('order')
wavelets = [Wavelet(('gmw', dict(order=k, **wavopts))) for k in order]
return wavelets, wavopts
def _process_args(wavelet, order, average, kw):
if isinstance(order, (list, range)):
order = tuple(order)
if not isinstance(order, (list, tuple)):
order = [order]
if len(order) == 1 and average:
WARN("`average` ignored with single `order`")
average = False
wavelets, wavopts = _process_wavelet(wavelet, order)
scales = kw.get('scales', 'log-piecewise')
if isinstance(scales, str):
wav = Wavelet(('gmw', dict(order=0, **wavopts)))
scales = process_scales(scales, len(x), wavelet=wav,
nv=kw.get('nv', 32))
scales = S.asarray(scales, wav.dtype)
kw['scales'] = scales
return wavelets, order, average
wavelets, order, average = _process_args(wavelet, order, average, kw)
Wx_all, dWx_all = [], []
derivative = kw.get('derivative', False)
# take the CWTs
for k in range(len(order)):
out = cwt(x, wavelets[k], order=0, **kw)
Wx_all.append(out[0])
if derivative:
dWx_all.append(out[-1])
# handle averaging; strip `Wx_all` of list container if only one array
if average or (average is None and isinstance(order, tuple)):
Wx_all = Q.mean(S.vstack(Wx_all), axis=0)
if derivative:
dWx_all = Q.mean(S.vstack(dWx_all), axis=0)
elif len(Wx_all) == 1:
Wx_all = Wx_all[0]
if derivative:
dWx_all = dWx_all[0]
scales = kw['scales']
if not astensor and S.is_tensor(Wx_all):
Wx_all, scales, dWx_all = [g.cpu().numpy() if S.is_tensor(g) else g
for g in (Wx_all, scales, dWx_all)]
return ((Wx_all, scales, dWx_all) if derivative else
(Wx_all, scales))
| 23,966
| 39.691002
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/_test_signals.py
|
# -*- coding: utf-8 -*-
"""
Signals for testing effectiveness of time-frequency transforms against
variety of localization characteristics.
1. **sine**: pure sine or cosine at one frequency, `cos(2pi f t)`
a. sine
b. cosine
c. phase-shifted
d. trimmed (others complete exactly one cycle) (not implemented but is
trivial; do e.g. `x = x[20:-50]`)
2. **<name>:am**: <name> with amplitude modulation, i.e. `A(t) * fn(t)`
a. |sine|
b. |cosine|
c. exp
d. gauss
3. **#<name>**: superimpose reflected <name> onto itself, i.e. `x += x[::-1]`
4. **lchirp**: linear chirp, `cos(2pi t**2/2)`, spanning `fmin` to `fmax`
5. **echirp**: exponential chirp, `cos(2pi exp(t))`, spanning `fmin` to `fmax`
6. **hchirp**: hyperbolic chirp, `cos(2pi a/(b - t))`, spanning `fmin` to `fmax`
7, 8, 9: **par_lchirp, par_echirp, par_hchirp**: linear, exponential, hyperbolic
chirps, superposed, with frequency modulation in parallel,
spanning `fmin1` to `fmax1` and `fmin2` to `fmax2`.
10. **jumps**: large instant frequency transitions, `cos(2pi f*t), f=2 -> f=100`
11. **packed**: closely-spaced bands of sinusoids with majority overlap, e.g.
`cos(w*t[No:]) + cos((w+1)*t[-No:]) + cos((w+3)*t[No:]) + ...`,
`No = .8*len(t)`.
12. **packed_poly**: closely-packed polynomial frequency modulations
(non-configurable)
Generates https://www.desmos.com/calculator/swbhgezpjk with A.M.
13. **poly_cubic**: cubic polynomial frequency variation + pure tone
(non-configurable)
"""
import inspect
import numpy as np
import scipy.signal as sig
from numpy.fft import rfft
from . import plt
from ._ssq_cwt import ssq_cwt
from ._ssq_stft import ssq_stft
from .utils import WARN, _textwrap
from .wavelets import Wavelet
from .visuals import plot, plots, imshow
from .ridge_extraction import extract_ridges
pi = np.pi
DEFAULT_N = 512
DEFAULT_SNR = None
DEFAULT_SEED = None
DEFAULT_ARGS = {
'cosine': dict(f=64, phi0=0),
'sine': dict(f=64, phi0=0),
'lchirp': dict(tmin=0, tmax=1, fmin=0, fmax=None),
'echirp': dict(tmin=0, tmax=1, fmin=1, fmax=None),
'hchirp': dict(tmin=0, tmax=1, fmin=1, fmax=None),
'jumps': dict(),
'low': dict(),
'am-cosine': dict(amin=.1),
'am-sine': dict(amin=.1),
'am-exp': dict(amin=.1),
'am-gauss': dict(amin=.01),
'sine:am-cosine': (dict(f=16), dict(amin=.5)),
}
DEFAULT_TKW = dict(tmin=0, tmax=1, endpoint=True)
#### Test signals ############################################################
class TestSignals():
"""Signals of varying time-frequency characteristics. Convenience methods
to plot multiple signals and their transforms under varying wavelet / window
parameters.
`.demo(signals)` to visualize `signals`, `test_transforms(fn)` to apply `fn`
to and visualize output.
See `examples/` on Github, and
https://overlordgolddragon.github.io/test-signals/
Also see `help(ssqueezepy._test_signals)`, `TestSignals.SUPPORTED`,
`TestSignals.DEMO`.
**Sweep functions**
For `lchirp`, `echirp`, & `hchirp`, `N` will be determined automatically
if `tmin`, `tmax`, `fmin`, and `fmax` are provided, minimally such that
no aliasing occurs.
**Demo signals**
`TestSignals.DEMO` holds list of `signals` names invoked when passing
`signals='all'`, which can be changed.
# Arguments
N: int
Will use this as default `N` anytime `N` is left unspecified.
snr: float / None
If not None, will add random normal (White Gaussian) noise to
signal of SNR `snr` - computed as:
SNR = 10*log10(xo_var / noise_var)
noise_var = xo_var / 10^(SNR/10)
where `xo_var` = unnoised signal variance.
default_args: dict
`{<signal_name>: {'param_name': value}}` pairs, where `signal_name`
is one of `SUPPORTED`. See `test_signals.DEFAULT_ARGS`.
default_tkw: dict
Example with all key-value pairs: `dict(tmin=0, tmax=1)`.
warn_alias: bool (default True)
Whether to print warning if generated signal aliases (f > fs/2);
to disable, pass `warn_alias=False` to `__init__()`, or set directly
on instance (`TestSignals().warn_alias=False`).
seed: int / None
If not None, will `np.random.seed(seed)` before applying `snr` noise.
"""
SUPPORTED = ['cosine', 'sine', 'lchirp', 'echirp', 'echirp_pc', 'hchirp',
'par-lchirp', 'par-echirp', 'par-hchirp', 'jumps', 'packed',
'packed-poly', 'poly-cubic',
'am-sine', 'am-cosine', 'am-exp', 'am-gauss']
# what to show with `signal='all'`, and in what order
DEMO = ['cosine', 'sine',
'lchirp', 'echirp', 'hchirp',
'#lchirp', '#echirp', '#hchirp',
'par-lchirp', 'par-echirp', 'par-hchirp', '#par-lchirp',
'jumps', 'packed', 'packed-poly', 'poly-cubic',
'am-sine', 'am-cosine', 'am-exp', 'am-gauss']
def __init__(self, N=None, snr=None, default_args=None, default_tkw=None,
warn_alias=True, seed=None):
self.N = N or DEFAULT_N
self.snr = snr or DEFAULT_SNR
self.default_args = default_args or DEFAULT_ARGS
self.default_tkw = default_tkw or DEFAULT_TKW
self.warn_alias = warn_alias
self.seed = seed or DEFAULT_SEED
# set defaults on unspecified
for k, v in DEFAULT_ARGS.items():
self.default_args[k] = self.default_args.get(k, v)
for k, v in DEFAULT_TKW.items():
self.default_tkw[k] = self.default_tkw.get(k, v)
#### test signals ########################################################
def _maybe_warn_alias(self, phi, tol=.02):
# allow non-trivial overshoot as it may occur but not worth warning
if self.warn_alias:
fmax = np.diff(phi).max()
if (fmax - np.pi) > tol:
WARN("`%s` has aliased w/ max(diff(phi))=%.6f>%.6f" % (
inspect.stack()[2][3], fmax, pi))
def sine(self, N=None, f=1, phi0=0, **tkw):
"""sin(2pi*f*t + phi)"""
tkw['endpoint'] = tkw.get('endpoint', False)
t, *_ = self._process_params(N, tkw)
phi = 2*pi * f * t + phi0
self._maybe_warn_alias(phi)
return np.sin(phi), t
def cosine(self, N=None, f=1, phi0=0, **tkw):
"""cos(2pi*f*t + phi)"""
tkw['endpoint'] = tkw.get('endpoint', False)
t, *_ = self._process_params(N, tkw)
phi = 2*pi * f * t + phi0
self._maybe_warn_alias(phi)
return np.cos(phi), t
def _generate(self, fn, N, fmin, fmax, **tkw):
"""Used by chirps."""
t, tmin, tmax, fmax = self._process_params(N, tkw, fn, fmin, fmax)
phi = fn(t, tmin, tmax, fmin, fmax)
self._maybe_warn_alias(phi)
return np.cos(phi), t
def lchirp(self, N=None, fmin=0, fmax=None, **tkw):
"""
>>> f(t) = a*t + b
>>> phi(t) = (a/2)*(t^2 - tmin^2) + b*(t - tmin)
>>> a = (fmin - fmax) / (tmin - tmax)
b = (fmin*tmax - fmax*tmin) / (tmax - tmin)
"""
return self._generate(self._lchirp_fn, N, fmin, fmax, **tkw)
def _lchirp_fn(self, t, tmin, tmax, fmin, fmax, get_w=False):
a = (fmin - fmax) / (tmin - tmax)
b = (fmin*tmax - fmax*tmin) / (tmax - tmin)
phi = (a/2)*(t**2 - tmin**2) + b*(t - tmin)
phi *= (2*pi)
if get_w:
w = a*t + b
w *= (2*pi)
return (phi, w) if get_w else phi
def echirp(self, N=None, fmin=1, fmax=None, **tkw):
"""
>>> f(t) = a*b^t
>>> phi(t) = (a/ln(b)) * (b^t - b^tmin)
>>> a = (fmin^tmax / fmax^tmin) ^ 1/(tmax - tmin)
b = fmax^(1/tmax) * (1/a)^(1/tmax)
"""
return self._generate(self._echirp_fn, N, fmin, fmax, **tkw)
def _echirp_fn(self, t, tmin, tmax, fmin, fmax, get_w=False):
a = (fmin**tmax / fmax**tmin) ** (1/(tmax - tmin))
b = fmax**(1/tmax) * (1/a)**(1/tmax)
phi = (a/np.log(b)) * (b**t - b**tmin)
phi *= (2*pi)
if get_w:
w = a*b**t
w *= (2*pi)
return (phi, w) if get_w else phi
def echirp_pc(self, N=None, fmin=0, fmax=None, **tkw):
"""Alternate design that keeps f'(t) fixed at `e`, but is no longer
geometric in the sense `f(t2) / f(t1) = const.`. "echirp plus constant".
>>> f(t) = a*exp(t) + b
>>> phi(t) = a*(exp(t) - exp(tmin)) + b*(t - tmin)
>>> a = (fmax - fmin)/(exp(tmax) - exp(tmin))
b = (fmin*exp(tmax) - fmax*exp(tmin)) / (exp(tmax) - exp(tmin))
"""
return self._generate(self._echirp_pc_fn, N, fmin, fmax, **tkw)
def _echirp_pc_fn(self, t, tmin, tmax, fmin, fmax, get_w=False):
a, b, c, d = fmin, fmax, tmin, tmax
A = (b - a) / (np.exp(d) - np.exp(c))
B = (a*np.exp(d) - b*np.exp(c)) / (np.exp(d) - np.exp(c))
phi = A*(np.exp(t) - np.exp(tmin)) + B*(t - tmin)
phi *= (2*pi)
if get_w:
w = A*np.exp(t) + B
w *= (2*pi)
return (phi, w) if get_w else phi
def hchirp(self, N=None, fmin=.1, fmax=None, **tkw):
"""
>>> f(t) = A / (B - t)^2
>>> phi(t) = A * (1/(B - t) + 1/(tmin - B))
>>> a, b, c, d = fmin, fmax, tmin, tmax
A = AN / AD, B = BN / BD,
AN = 2*sqrt(a^3*b^3*(c - d)^4) + a^2*b*(c - d)^2 + a*b^2*(c - d)^2
AD = (a - b)^2
BN = sqrt(a^3*b^3*(c-d)^4) + a^2*b*c*(c-d) + a*b^2*d*(d - c)
BD = a*b*(a - b)*(c - d)
"""
return self._generate(self._hchirp_fn, N, fmin, fmax, **tkw)
def _hchirp_fn(self, t, tmin, tmax, fmin, fmax, get_w=False):
a, b, c, d = fmin, fmax, tmin, tmax
AN = (2*np.sqrt(a**3*b**3*(c - d)**4) + a**2*b*(c - d)**2
+ a*b**2*(c - d)**2)
AD = (a - b)**2
BN = np.sqrt(a**3*b**3*(c - d)**4) + a**2*b*c*(c - d) + a*b**2*d*(d - c)
BD = a*b*(a - b)*(c - d)
A = AN / AD
B = BN / BD
phi = A * (1/(B - t) + 1/(tmin - B))
phi *= (2*pi)
if get_w:
w = A / (B - t)**2
w *= (2*pi)
return (phi, w) if get_w else phi
def par_lchirp(self, N=None, fmin1=None, fmax1=None, fmin2=None, fmax2=None,
**tkw):
"""Linear frequency modulation in parallel. Should have
`fmax2 > fmax1`, `fmin2 > fmin1`, and shared `tmin`, `tmax`.
"""
N = N or self.N
fdiff_default = N/10
if fmin1 is None:
fmin1 = self.default_args['lchirp'].get('fmin', 0)
if fmin2 is None:
fmin2 = fmin1 + fdiff_default
if fmax2 is None or fmax1 is None:
if fmax1 is None:
fmax2 = N/2
fmax1 = fmax2 - fdiff_default
else:
fmax2 = min(N/2, fmax1 + fdiff_default)
x1, t = self.lchirp(N, fmin1, fmax1, **tkw)
x2, _ = self.lchirp(N, fmin2, fmax2, **tkw)
x = x1 + x2
return x, t
def par_echirp(self, N=None, fmin1=None, fmax1=None, fmin2=None, fmax2=None,
**tkw):
"""Exponential frequency modulation in parallel. Should have
`fmax2 > fmax1`, `fmin2 > fmin1`, and shared `tmin`, `tmax`.
"""
N = N or self.N
fratio_default = 1.5
if fmin1 is None:
fmin1 = self.default_args['echirp'].get('fmin', 1)
if fmin2 is None:
fmin2 = fmin1 * fratio_default
if fmax2 is None or fmax1 is None:
if fmax1 is None:
fmax2 = N/2
fmax1 = fmax2 / fratio_default
else:
fmax2 = min(N/2, fmax1 * fratio_default)
x1, t = self.echirp(N, fmin1, fmax1, **tkw)
x2, _ = self.echirp(N, fmin2, fmax2, **tkw)
x = x1 + x2
return x, t
def par_hchirp(self, N=None, fmin1=None, fmax1=None, fmin2=None, fmax2=None,
**tkw):
"""Hyperbolic frequency modulation in parallel. Should have
`fmax2 > fmax1`, `fmin2 > fmin1`, and shared `tmin`, `tmax`.
"""
N = N or self.N
fratio_default = 3
if fmin1 is None:
fmin1 = self.default_args['hchirp'].get('fmin', 1)
if fmin2 is None:
fmin2 = fmin1 * fratio_default
if fmax2 is None or fmax1 is None:
if fmax1 is None:
fmax2 = N/2
fmax1 = fmax2 / fratio_default
else:
fmax2 = min(N/2, fmax1 * fratio_default)
x1, t = self.hchirp(N, fmin1, fmax1, **tkw)
x2, _ = self.hchirp(N, fmin2, fmax2, **tkw)
x = x1 + x2
return x, t
def am_sine(self, N=None, f=1, amin=0, amax=1, phi=0, **tkw):
"""Sine amplitude modulation, `|sin(w) + 1| / 2`."""
N = N or self.N
_A, t = self.sine(N, f, phi, **tkw)
_A = (_A + 1) / 2
return amin + (amax - amin) * _A, t
def am_cosine(self, N=None, f=1, amin=0, amax=1, phi=0, **tkw):
"""Cosine amplitude modulation, `|cos(w) + 1| / 2`."""
N = N or self.N
_A, t = self.cosine(N, f, phi, **tkw)
_A = (_A + 1) / 2
return amin + (amax - amin) * _A, t
def am_exp(self, N=None, amin=.1, amax=1, **tkw):
"""Uses `echirp`'s expression for `f(t)`."""
N = N or self.N
t, tmin, tmax = self._process_params(N, tkw)
_A = self._echirp_fn(t, tmin, tmax, amin, amax, get_w=True)[1]
_A /= (2*pi)
return _A, t
def am_gauss(self, N=None, amin=.1, amax=1, **tkw):
"""Gaussian centered at center sample (`N/2`)."""
N = N or self.N
t = _t(-1, 1, N)
_A = np.exp( -((t - t.mean())**2 * 5) )
return amin + (amax - amin)*_A, t
def jumps(self, N=None, freqs=None, **tkw):
"""Large instant freq transitions, e.g. `cos(2pi f*t), f=2 -> f=100`."""
N = N or self.N
t, tmin, tmax = self._process_params(N, tkw)
n_freqs = len(freqs) if freqs is not None else 4
M = N // n_freqs
if freqs is None:
freqs = [1, M/4, M/2, M/16]
tdiff = tmax - tmin
x_freqs = []
endpoint = tkw.get('endpoint', self.default_tkw.get('endpoint', False))
t_all = _t(tmin, tdiff * len(freqs), M * len(freqs), endpoint)
for i, f in enumerate(freqs):
t = t_all[i*M : (i+1)*M]
x_freqs.append(np.cos(2*pi * f * t))
x, t = np.hstack(x_freqs), t_all
return x, t
def packed(self, N=None, freqs=None, overlap=.8, **tkw):
"""Closely-spaced bands of sinusoids with majority overlap, e.g.
`cos(w*t[No:]) + cos((w+1)*t[-No:]) + cos((w+3)*t[No:]) + ...`,
`No = .8*len(t)`.
"""
N = N or self.N
t, *_ = self._process_params(N, tkw)
if freqs is None:
freqs = [.5, 1, 2, N/10, N/10 + N/50, N/10 + N/25,
N/5, N/4, N/3, N/3 + N/10]
N_overlap = int(overlap*len(t))
x = np.zeros(len(t))
for i, f in enumerate(freqs):
idxs = (slice(0, N_overlap) if (i % 2 == 0) else
slice(-N_overlap, None))
x[idxs] += np.cos(2*pi * f * t[idxs])
return x, t
def packed_poly(self, N=None, **tkw):
"""Closely-packed polynomial frequency modulations (non-configurable;
adjusts with N to keep bands approx unmoved in time-frequency plane).
Generates https://www.desmos.com/calculator/swbhgezpjk with A.M.
"""
N = N or self.N
t = np.linspace(0, 10, N)
k1, k2, k3 = 10, 2.4, 4.8 # offsets
adj = N / 512 # keep FMs around same part of time-freq plane
k1, k2, k3 = k1*adj, k2*adj, k3*adj
x1 = (1 + .3 * np.cos(t)
) * np.cos(2*np.pi * (k1*t - 0.3*adj*np.sin(t) - 1.8*adj*t**1.5))
x2 = (1 + .2 * np.cos(2*t)) * np.exp(-t/15) * np.cos(
2*np.pi * (k2*t + 0.5*adj*t**1.2 + .3*np.sin(t)))
x3 = np.cos(2*np.pi * (k3*t + .2*adj*t**1.3))
x = x1 + x2 + x3
return x, t
def poly_cubic(self, N=None, **tkw):
"""Cubic polynomial frequency variation + pure tone (non-configurable;
adjusts with N to keep bands approx unmoved in time-frequency plane).
"""
N = N or self.N
t = np.linspace(0, 10, N, endpoint=True)
p1 = np.poly1d([0.025, -0.36, 1.25, 2.0]) * (N / 256)
p3 = np.poly1d([0.01, -0.25, 1.5, 4.0]) * (N / 256)
x1 = sig.sweep_poly(t, p1)
x3 = sig.sweep_poly(t, p3)
x2 = np.sin(2*np.pi * (.5*N/256) * t)
x = x1 + x2 + x3
return x, t
#### Test functions ######################################################
def demo(self, signals='all', N=None, dft=None):
"""Plots signal waveforms, and optionally their DFTs.
# Arguments:
signals: str / [str] / [(str, dict)]
'all' will set `signals = TestSignals.DEMO`, and plot in
that order. Else, strings must be in `TestSignals.SUPPORTED`.
Can also be `(str, dict)` pairs in a list, dict passed as
keyword arguments to the generating function.
Also see `help(ssqueezepy._test_signals)`, and
`help(TestSignals.make_signals)`.
N: int
Length (# of samples) of generated signals.
dft: None / str['rows', 'cols']
If not None, will also plot DFT of each signal along the signal.
If `'cols'`, will stack horizontally - if `'rows'`, vertically.
"""
data = self.make_signals(signals, N, get_params=True)
if dft not in (None, 'rows', 'cols'):
raise ValueError(f"`dft` must be 'rows', 'cols', or None (got {dft})")
elif dft == 'cols':
dft_kw = dict(ncols=2, h=.55, w=1)
elif dft == 'rows':
dft_kw = dict(nrows=2)
for name, (x, t, (fparams, aparams)) in data.items():
title = self._title(name, len(x), fparams, aparams)
if dft:
axrf = np.abs(rfft(x))
pkw = [{'title': title}, {'title': f"rDFT({name})"}]
plots([t, None], [x, axrf], pkw=pkw, show=1, **dft_kw)
else:
plot(t, x, show=1, title=title)
def test_transforms(self, fn, signals='all', N=None):
"""Make `fn` return `None` to skip visuals (e.g. if already done by `fn`).
Input signature is `fn(x, t, params, ...)`, where
`params = (name, fparams, aparams)`. Output, if not None, must be
`(Tf, pkw)`, where `Tf` is a 2D np.ndarray time-frequency transform,
and `pkw` is keyword arguments to `ssqueezepy.visuals.imshow`
(can be empty dict).
Also see `help(ssqueezepy._test_signals)`, and
`help(TestSignals.make_signals)`.
"""
data = self.make_signals(signals, N, get_params=True)
default_pkw = dict(abs=1, show=1)
for name, (x, t, (fparams, aparams)) in data.items():
out = fn(x, t, (name, fparams, aparams))
if out is not None:
out, pkw = out
default_pkw['title'] = self._title(name, len(x), fparams, aparams)
for k, v in default_pkw.items():
pkw[k] = pkw.get(k, v)
if isinstance(out, (tuple, list)):
for o in out:
imshow(o, **pkw)
else:
imshow(out, **pkw)
#### utils ###############################################################
def make_signals(self, signals='all', N=None, get_params=False):
"""Generates `signals` signals of length `N`.
Returns list of signals `[x0, x1, ...]` (or if `get_params`, dictionary
of `{name: x, t, (fparams, aparams)}`), where `x` is the signal,
`t` is its time vector, `fparams` is a dict of keyword argsto the carrier,
and `aparams` to the amplitude modulator (if applicable, e.g.
`lchirp:am-sine').
`fparams` may additionally contain a special kwarg: `snr`, not passed to
carrier `fn`, that adds random normal noise of SNR `snr` to signal.
Also see `help(ssqueezepy._test_signals)`.
"""
def _process_args(name, fparams, aparams):
fname, aname = (name.split(':') if ':' in name else
(name, ''))
fname, aname = fname.replace('-', '_'), aname.replace('-', '_')
fname = fname.lstrip('#') # in case present
fn = (getattr(self, fname) if fname else
lambda *args, **kw: (np.ones(args[0]), None))
afn = (getattr(self, aname) if aname else
lambda *args, **kw: (np.ones(args[0]), None))
tkw = {}
for dc in (fparams, aparams): # `aparams` take precedence
for k, v in dc.items():
if k in ('tmin', 'tmax', 'endpoint'):
tkw[k] = v
return fn, afn, fname, aname, tkw
names, params_all = self._process_input(signals)
data = {}
for name, (fparams, aparams) in zip(names, params_all):
fn, afn, *_, tkw = _process_args(name, fparams, aparams)
snr = fparams.pop('snr', self.snr)
x, t = fn(N, **fparams)
x *= afn(len(x), **aparams, **tkw)[0]
if name[0] == '#':
x += x[::-1]
if snr:
noise_var = x.var() / 10**(snr/10)
if self.seed is not None:
np.random.seed(self.seed)
noise = np.sqrt(noise_var) * np.random.randn(len(x))
# use actual values
fparams['snr'] = 10*np.log10(x.var() / noise.var())
x += noise
data[name] = (x, t, (fparams, aparams))
if not get_params:
data = [d[0] for d in data.values()]
if len(data) == 1:
data = data[0]
return data
@classmethod
def _title(self, signal, N, fparams, aparams, x=None, wrap_len=70):
fparams = self._process_varname_alias(signal, N, fparams)
snr = fparams.pop('snr', None)
if snr:
snr = "{:.1f}dB".format(snr)
fparams = dict(N=N, SNR=snr, **fparams)
else:
fparams = dict(N=N, **fparams)
# drop `.0` from integer floats
fparams = {k: (int(v) if (isinstance(v, float) and v.is_integer()) else v)
for k, v in fparams.items()}
ptxt = ', '.join(f"{k}={v}" for k, v in fparams.items())
title = "{} | {}".format(signal, ptxt)
if aparams:
atxt = ', '.join(f"{k}={v}" for k, v in aparams.items())
title += ', %s' % atxt
title = _textwrap(title, wrap_len)
return title
@staticmethod
def _process_varname_alias(signal, N, fparams):
fparams = fparams.copy()
for k, v in fparams.items():
if (k == 'fmax' and v is None and
any(s in signal for s in ('lchirp', 'echirp', 'hchirp'))):
fparams['fmax'] = N / 2
return fparams
def _process_params(self, N, tkw, fn=None, fmin=None, fmax=None):
tkw = tkw.copy()
for k in self.default_tkw:
tkw[k] = tkw.get(k, self.default_tkw[k])
if N is None:
tmin, tmax = tkw['tmin'], tkw['tmax']
if any(var is None for var in (tmin, tmax, fmin, fmax)):
N = self.N
else:
f_fn = lambda *args, **kw: fn(*args, **kw, get_w=True)[1]
N = self._est_N_nonalias(f_fn, tmin, tmax, fmin, fmax)
if fmax is None:
fmax = N // 2
t = _t(**tkw, N=N)
tmin, tmax = tkw['tmin'], tkw['tmax']
return ((t, tmin, tmax, fmax) if fn else
(t, tmin, tmax))
def _est_N_nonalias(self, f_fn, tmin, tmax, fmin, fmax):
"""Find smallest `N` (number of samples) such that signal generated
from `tmin` to `tmax` will not alias.
https://dsp.stackexchange.com/a/72942/50076
max_phi_increment = fmax_fn * (t[1] - t[0])
t[1] - t[0] = (tmax - tmin) / (N - 1) [[endpoint=True]]
max_phi_increment = pi
fmax_fn * (tmax - tmin) / (N - 1) = pi
1 + fmax_fn * (tmax - tmin) / pi = N
"""
# sample sufficiently finely
t = np.linspace(tmin, tmax, 50000, endpoint=True)
fmax_fn = np.max(f_fn(t, tmin, tmax, fmin, fmax))
min_nonalias_N = int(np.ceil(1 + fmax_fn*(tmax - tmin)/pi))
return min_nonalias_N
def _process_input(self, signals):
"""
`signals`:
- Ensure is string, or list/tuple of strings or of lists/tuples,
each list/tuple being a (str, dict) or (str, (dict, dict)) pair.
- Ensure each string is in `SUPPORTED`, and has an accompanying
`params` pair (if not, set from `defalt_args`).
- Loads parameters into two separate dictionaries, one for
'carrier' / base function, other for (amplitude) 'modulator'.
Defaults loaded according to precedence: `name:am-name` overrides
`name` and `am-name`, but latter two are used if former isn't set.
"""
def raise_type_error(signal):
raise TypeError("all tuple or list elements of `signals` "
"must be (str, dict) or (str, (dict, dict)) pairs "
"(got (%s))" % ', '.join(
map(lambda s: type(s).__name__, signal)))
if isinstance(signals, (str, tuple)):
if signals != 'all':
signals = [signals]
elif not isinstance(signals, list):
raise TypeError("`signals` must be string, list, or tuple "
"(got %s)" % type(signals))
if isinstance(signals, list):
for signal in signals:
if isinstance(signal, str):
if ':' in signal:
fname, aname = signal.split(':')
else:
fname, aname = signal, ''
fname = fname.lstrip('#')
for name in (fname, aname):
if name != '' and name not in self.SUPPORTED:
raise ValueError(f"'{name}' is not supported; "
"must be one of: "
+ ", ".join(self.SUPPORTED))
elif isinstance(signal, (list, tuple)):
if not (isinstance(signal[0], str) and
isinstance(signal[1], (dict, list, tuple))):
raise_type_error(signal)
elif (isinstance(signal[1], (list, tuple)) and
not (isinstance(signal[1][0], dict) and
isinstance(signal[1][1], dict))):
raise_type_error(signal)
else:
raise TypeError("all elements of `signals` must be string, "
"or tuple or list of (string, dict) or "
"(string, (dict, dict)) pairs "
"(found %s)" % type(signal))
if signals == 'all':
signals = self.DEMO.copy()
elif not isinstance(signals, (list, tuple)):
signals = [signals]
names, params_all = [], []
for signal in signals:
if isinstance(signal, (tuple, list)):
name, params = signal
if isinstance(params, (list, tuple)):
fparams, aparams = params
else:
fparams, aparams = params, {}
else:
name, fparams, aparams = signal, {}, {}
if name[0] == '#':
add_reversed = True
name = name[1:]
else:
add_reversed = False
if 'am-' in name:
if name.startswith('am-'):
if name.endswith(':'):
name = name.rstrip(':')
fname, aname = 'cosine', name
defaults = (self.default_args.get(fname, {}),
self.default_args.get(aname, {}))
name = fname + ':' + aname
else:
defaults = self.default_args.get(name, {})
fname, aname = name.split(':')
if isinstance(defaults, (list, tuple)):
fdefaults, adefaults = defaults
elif isinstance(defaults, dict) and defaults != {}:
fdefaults, adefaults = defaults, {}
else:
fdefaults, adefaults = self.default_args.get(fname, {}), {}
if adefaults == {}:
adefaults = self.default_args.get(aname, {})
for k, v in fdefaults.items():
fparams[k] = fparams.get(k, v)
for k, v in adefaults.items():
aparams[k] = aparams.get(k, v)
if name.startswith('am-'):
fdefaults, adefaults = adefaults, fdefaults
else:
for k, v in self.default_args.get(name, {}).items():
fparams[k] = fparams.get(k, v)
if add_reversed:
name = '#' + name
names.append(name)
params_all.append([fparams, aparams])
# store latest result for debug purposes
self._names = names
self._params_all = params_all
return names, params_all
#### prebuilt test methods ##############################################
def wavcomp(self, wavelets, signals='all', N=None, w=1.2, h=None,
tight_kw=None):
"""Plots CWT & SSQ_CWT taken with `wavelets` wavelets side by side,
vertically.
"""
if not isinstance(wavelets, (list, tuple)):
wavelets = [wavelets]
wavs = []
for wavelet in wavelets:
wavs.append(Wavelet._init_if_not_isinstance(wavelet))
fn = lambda x, t, params: self._wavcomp_fn(
x, t, params, wavelets, w=w, h=h, tight_kw=tight_kw)
self.test_transforms(fn, signals=signals, N=N)
def _wavcomp_fn(self, x, t, params, wavelets, w=1.2, h=None, tight_kw=None):
def _get_default_hspace():
"""Set dims based on maximum number of rows titles occupy."""
title_nrows = []
for wavelet in wavelets:
name, fparams, aparams = params
title1, title2 = self._title_cwt(wavelet, name, x,
fparams, aparams)
title_nrows.extend([title1.count('\n'), title2.count('\n')])
max_rows = max(title_nrows) + 1
return (.13 + .05*(max_rows - 2)) * (.9 / h)
h = h or .45 * len(wavelets)
fig, axes = plt.subplots(len(wavelets), 2, figsize=(w * 12, h * 12))
for i, wavelet in enumerate(wavelets):
Tx, Wx, *_ = ssq_cwt(x, wavelet, t=t, flipud=1, astensor=False)
name, fparams, aparams = params
title1, title2 = self._title_cwt(wavelet, name, x, fparams, aparams)
pkw = dict(abs=1, ticks=0, fig=fig)
imshow(Wx, **pkw, ax=axes[i, 0], show=0, title=title1)
imshow(Tx, **pkw, ax=axes[i, 1], show=0, title=title2)
tight_kw = tight_kw or {}
default_hspace = _get_default_hspace()
defaults = dict(left=0, right=1, bottom=0, top=1, wspace=.01,
hspace=default_hspace)
for k, v in defaults.items():
tight_kw[k] = v
plt.subplots_adjust(**tight_kw)
plt.show()
def cwt_vs_stft(self, wavelet, window, signals='all', N=None,
win_len=None, n_fft=None, window_name=None, config_str='',
w=1.2, h=.9, tight_kw=None):
"""Plots CWT & SSQ_CWT, and STFT & SSQ_STFT of `signals` taken with
`wavelet` and `window` along the rest of parameters.
`window_name` & `config_str` are used to title STFT plots. `w` & `h`
control plots' width & height. `tight_kw` is passed to
`plt.subplots_adjust()`.
"""
fn = lambda x, t, params: self._cwt_vs_stft_fn(
x, t, params, wavelet, window, win_len, n_fft, window_name,
config_str, w, h, tight_kw)
self.test_transforms(fn, signals=signals, N=N)
def _cwt_vs_stft_fn(self, x, t, params, wavelet, window, win_len=None,
n_fft=None, window_name=None, config_str='', w=1.2, h=.9,
tight_kw=None):
def _get_default_hspace():
"""Set dims based on maximum number of rows titles occupy."""
max_rows = 1 + max(g.count('\n') for g in (ctitle1, ctitle2,
stitle1, stitle2))
return (.13 + .05*(max_rows - 2)) * (.9 / h)
fs = 1 / (t[1] - t[0])
Tsx, Sx, *_ = ssq_stft(x, window, n_fft=n_fft, win_len=win_len, fs=fs,
astensor=False)
Twx, Wx, *_ = ssq_cwt(x, wavelet, t=t, flipud=1, astensor=False)
Tsx, Sx = np.flipud(Tsx), np.flipud(Sx)
name, fparams, aparams = params
ctitle1, ctitle2 = self._title_cwt( wavelet, name, x, fparams, aparams)
stitle1, stitle2 = self._title_stft(window, name, x, fparams, aparams,
win_len, n_fft, window_name,
config_str)
fig, axes = plt.subplots(2, 2, figsize=(w * 12, h * 12))
pkw = dict(abs=1, ticks=0, fig=fig)
imshow(Wx, **pkw, ax=axes[0, 0], show=0, title=ctitle1)
imshow(Twx, **pkw, ax=axes[0, 1], show=0, title=ctitle2)
imshow(Sx, **pkw, ax=axes[1, 0], show=0, title=stitle1)
norm = ((0, np.abs(Tsx).mean()*300) if ("packed-poly" in name)
else None)
norm = ((0, np.abs(Tsx).mean()*200) if ("#par-lchirp" in name)
else norm)
imshow(Tsx, **pkw, ax=axes[1, 1], show=0, title=stitle2, norm=norm)
tight_kw = tight_kw or {}
default_hspace = _get_default_hspace()
defaults = dict(left=0, right=1, bottom=0, top=1, wspace=.01,
hspace=default_hspace)
for k, v in defaults.items():
tight_kw[k] = v
plt.subplots_adjust(**tight_kw)
plt.show()
@staticmethod
def _title_cwt(wavelet, name, x, fparams, aparams, wrap_len=53):
title = TestSignals._title(name, len(x), fparams, aparams)
# special case: GMW
wname = wavelet.name.replace(' L1', '').replace(' L2', '')
twav = '%s wavelet | %s' % (wname, wavelet.config_str)
ctitle1 = title + '\nabs(CWT) | ' + twav
ctitle2 = 'abs(SSQ_CWT)'
ctitle1 = _textwrap(ctitle1, wrap_len)
return ctitle1, ctitle2
@staticmethod
def _title_stft(window, name, x, fparams, aparams, win_len=None, n_fft=None,
window_name='', config_str='', wrap_len=53):
title = TestSignals._title(name, len(x), fparams, aparams)
if win_len is not None:
twin = "{} window | win_len={}, n_fft={}, {}".format(
window_name, win_len, n_fft, config_str)
else:
twin = "{} window | n_fft={}, {}".format(window_name, n_fft,
config_str)
stitle1 = title + '\nabs(STFT) | ' + twin
stitle2 = 'abs(SSQ_STFT)'
stitle1 = _textwrap(stitle1, wrap_len)
return stitle1, stitle2
def ridgecomp(self, signals='all', N=None, penalty=20, n_ridges=2, bw=None,
transform='cwt', w=1.2, h=.4, **transform_kw):
"""Plots extracted ridges from a CWT or STFT and them SSQ'd of `signals`,
superimposed on the transform itself, passing in `transform_kw` to
`ssq_cwt` or `ssq_stft`. `w` & `h` control plots' width & height.
See `help(ridge_extraction.extract_ridges)`.
"""
fn = lambda x, t, params: self._ridgecomp_fn(
x, t, params, penalty, n_ridges, bw, transform,
**transform_kw)
self.test_transforms(fn, signals=signals, N=N)
def _ridgecomp_fn(self, x, t, params, penalty=20, n_ridges=2, bw=None,
transform='cwt', w=1.2, h=.4, **transform_kw):
transform_fn = ssq_cwt if transform == 'cwt' else ssq_stft
transform_kw = transform_kw.copy()
transform_kw['astensor'] = False
Tfs, Tf, ssq_freqs, scales, *_ = transform_fn(x, t=t, **transform_kw)
if bw is None:
tf_bw, ssq_bw = 10, 2
elif isinstance(bw, tuple):
tf_bw, ssq_bw = bw
else:
tf_bw = ssq_bw = bw
rkw = dict(penalty=penalty, n_ridges=n_ridges, transform=transform)
ridges = extract_ridges(Tf, scales, bw=tf_bw, **rkw)
ssq_ridges = extract_ridges(Tfs, ssq_freqs, bw=ssq_bw, **rkw)
name, fparams, aparams = params
if transform == 'cwt':
Tf = np.flipud(Tf)
ridges = len(Tf) - ridges
title, title_s = "abs(CWT) w/ ridges", "abs(SSQ_CWT) w/ ridges"
else:
title, title_s = "abs(STFT) w/ ridges", "abs(SSQ_STFT) w/ ridges"
tridge = "\npenalty={}, n_ridges={}, tf_bw={}, ssq_bw={}".format(
penalty, n_ridges, tf_bw, ssq_bw)
title += tridge
tbase = self._title(name, len(x), fparams, aparams)
title = tbase + '\n' + title
_, axes = plt.subplots(1, 2, figsize=(w * 12, h * 12))
pkw = dict(color='k', linestyle='--', ylims=(0, len(Tf)),
xlims=(0, Tf.shape[1]), ticks=0)
plot(ridges, ax=axes[0], **pkw)
imshow(Tf, abs=1, title=title, ax=axes[0], show=0)
plot(ssq_ridges, ax=axes[1], **pkw)
imshow(Tfs, abs=1, title=title_s, ax=axes[1], show=0)
tight_kw = dict(left=0, right=1, bottom=0, top=1, wspace=.01, hspace=0)
plt.subplots_adjust(**tight_kw)
plt.show()
def _t(tmin, tmax, N, endpoint=False):
return np.linspace(tmin, tmax, N, endpoint=endpoint)
| 38,844
| 38.557026
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/_gmw.py
|
# -*- coding: utf-8 -*-
"""Generalized Morse Wavelets.
For complete functionality, utility functions have been ported from jLab, and
largely validated to match jLab's behavior. jLab tests not ported.
"""
import numpy as np
from numpy.fft import ifft
from numba import jit
from scipy.special import (gamma as gamma_fn,
gammaln as gammaln_fn)
from .algos import nCk
from .wavelets import _xifn, _process_params_dtype
from .configs import gdefaults, USE_GPU, IS_PARALLEL
from .utils.backend import torch
from .utils import backend as S
pi = np.pi
#### Base wavelets (`K=1`) ###################################################
def gmw(gamma=None, beta=None, norm=None, order=None, centered_scale=None,
dtype=None):
"""Generalized Morse Wavelets. Returns function which computes GMW in the
frequency domain.
Assumes `beta != 0`; for full functionality use `_gmw.morsewave`.
Unlike `morsewave`, works with scales rather than frequencies.
Note that function for `norm='energy'` does *not* rescale freq-domain wavelet
per `sqrt(scale)`, for consistency with `ssqueezepy.wavelets`.
See `_gmw.compute_gmw` for code computing freq- and time-domain wavelets
as arrays with proper scaling in.
An overview: https://overlordgolddragon.github.io/generalized-morse-wavelets/
Interactive: https://www.desmos.com/calculator/4gcaeqidxd (bandpass)
https://www.desmos.com/calculator/zfxnblqh8p (energy)
# Arguments
gamma, beta: float > 0, float > 0
GMW parameters. See `help(_gmw.morsewave)`.
norm: str['energy', 'bandpass']
Normalization to use:
'energy': L2 norm, keeps time-domain wavelet's energy at unity
for all `freqs`, i.e. `sum(abs(psi)**2) == 1`.
'bandpass': L1 norm, keeps freq-domain wavelet's peak value at 2
for all `freqs`, i.e. `max(psih) == 2`, `w[argmax(psih)] == wc`.
Additionally see `help(_gmw.morsewave)`.
order: int (default 1)
Order of the wavelet. `k+1`-th order wavelet is orthogonal to `k`-th.
`k=0` will call a different but equivalent function for simpler code
and compute efficiency.
centered_scale: bool (default False)
Unlike other `ssqueezepy.wavelets`, by default `scale=1` in
`morsewave` (i.e. `freqs=1`) computes the wavelet at (peak) center
frequency. This ensures exact equality between `scale` and
`1 / center_frequency`, by multiplying input radians `w` by peak
center freq.
False by default for consistency with other `ssqueezepy` wavelets.
dtype: str / type (np.dtype) / None
See `help(wavelets.Wavelet)`.
# Returns
psihfn: function
Function that computes GMWs, taking `w` (radian frequency)
as argument.
# Usage
wavelet = gmw(3, 60)
wavelet = Wavelet('gmw')
wavelet = Wavelet(('gmw', {'beta': 60}))
Wx, *_ = cwt(x, 'gmw')
# Correspondence with Morlet
Following pairs yield ~same frequency resolution, which is ~same
time-frequency resolution for `mu > 5`, assuming `gamma=3` for all:
`mu`, `beta`
(1.70, 1.00),
(3.00, 3.00),
(4.00, 5.15),
(6.00, 11.5),
(8.00, 21.5),
(10.0, 33.5),
(12.0, 48.5),
(13.4, 60.0),
The default `beta=12` is hence to closely match Morlet's default `mu=6.`.
# vs Morlet
Differences grow significant when seeking excellent time localization
(low `mu`, <4), where Morlet's approximate analyticity breaks down and
negative frequencies are leaked, whereas GMW remains exactly analytic,
with vanishing moments toward dc bin. Else, the two don't behave
noticeably different for `gamma=3`.
# References
[1] Generalized Morse Wavelets. S. C. Olhede, A. T. Walden. 2002.
https://spiral.imperial.ac.uk/bitstream/10044/1/1150/1/
OlhedeWaldenGenMorse.pdf
[2] Generalized Morse Wavelets as a Superfamily of Analytic Wavelets.
J. M. Lilly, S. C. Olhede. 2012.
https://sci-hub.st/10.1109/TSP.2012.2210890
[3] Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede. 2009.
https://sci-hub.st/10.1109/TSP.2008.2007607
[4] (c) Lilly, J. M. (2021), jLab: A data analysis package for Matlab,
v1.6.9, http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/morsewave.m
"""
_check_args(gamma=gamma, beta=beta, norm=norm, order=order)
kw = gdefaults('_gmw.gmw', gamma=gamma, beta=beta, norm=norm, order=order,
centered_scale=centered_scale, dtype=dtype, as_dict=True)
norm, k = kw.pop('norm'), kw.pop('order')
if norm == 'energy'and dtype in ('float32', np.float32):
raise ValueError("`norm='energy'` w/ `dtype='float32'` is unsupported; "
"use 'float64' instead.")
l1_fn, l2_fn = ((gmw_l1, gmw_l2) if k == 0 else
(gmw_l1_k, gmw_l2_k))
if k > 0:
kw['k'] = k
return (l1_fn(**kw) if norm == 'bandpass' else
l2_fn(**kw))
def compute_gmw(N, scale, gamma=3, beta=60, time=False, norm='bandpass',
order=0, centered_scale=False, norm_scale=True, dtype=None):
"""Evaluates GMWs, returning as arrays. See `help(_gmw.gmw)` for full docs.
# Arguments
N: int > 0
Number of samples to compute.
scale: float > 0
Scale at which to sample the freq-domain wavelet: `psih(s * w)`.
gamma, beta, norm, order:
See `help(_gmw.gmw)`.
time: bool (default False)
Whether to compute the time-domain wavelet, `psi`.
centered_scale: bool (default False)
See `help(_gmw.gmw)`.
norm_scale: bool (default True)
Whether to rescale as `sqrt(s) * psih(s * w)` for the `norm='energy'`
case (no effect with `norm='bandpass'`).
# Returns
psih: np.ndarray [N]
Frequency-domain wavelet.
psi: np.ndarray [N]
Time-domain wavelet, returned if `time=True`.
"""
_check_args(gamma=gamma, beta=beta, norm=norm, scale=scale)
gmw_fn = gmw(gamma, beta, norm, order, centered_scale, dtype)
w = _xifn(scale, N)
X = np.zeros(N)
X[:N//2 + 1] = gmw_fn(w[:N//2 + 1])
if norm == 'energy' and norm_scale:
wc = morsefreq(gamma, beta)
X *= (np.sqrt(wc * scale) if centered_scale else
np.sqrt(scale))
X[np.isinf(X) | np.isnan(X)] = 0.
if time:
Xr = X.copy()
if N % 2 == 0:
# https://github.com/jonathanlilly/jLab/issues/13
Xr[N//2] /= 2
x = ifft(Xr * (-1)**np.arange(N))
return (X, x) if time else X
def gmw_l1(gamma=3., beta=60., centered_scale=False, dtype='float64'):
"""Generalized Morse Wavelets, first order, L1(bandpass)-normalized.
See `help(_gmw.gmw)`.
"""
_check_args(gamma=gamma, beta=beta, allow_zerobeta=False)
wc = morsefreq(gamma, beta)
wcl = np.log(wc)
gamma, beta, wc, wcl = _process_params_dtype(gamma, beta, wc, wcl, dtype=dtype)
fn = _gmw_l1_gpu if USE_GPU() else (_gmw_l1_par if IS_PARALLEL() else _gmw_l1)
if centered_scale:
return lambda w: fn(S.atleast_1d(w * wc, dtype), gamma, beta, wc, wcl)
else:
return lambda w: fn(S.atleast_1d(w, dtype), gamma, beta, wc, wcl)
@jit(nopython=True, cache=True)
def _gmw_l1(w, gamma, beta, wc, wcl):
# NOTE: numba.jit, unlike numpy & torch, will promote to float64 with
# array float32 and scalar float64
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return 2 * np.exp(- beta * wcl + wc**gamma
+ beta * np.log(w) - w**gamma) * w_nonneg
@jit(nopython=True, cache=True, parallel=True)
def _gmw_l1_par(w, gamma, beta, wc, wcl):
# NOTE: numba.jit, unlike numpy & torch, will promote to float64 with
# array float32 and scalar float64
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return 2 * np.exp(- beta * wcl + wc**gamma
+ beta * np.log(w) - w**gamma) * w_nonneg
def _gmw_l1_gpu(w, gamma, beta, wc, wcl):
w_nonneg = (w >= 0)
w *= w_nonneg
return 2 * torch.exp(- beta * wcl + wc**gamma
+ beta * torch.log(w) - w**gamma) * w_nonneg
def gmw_l2(gamma=3., beta=60., centered_scale=False, dtype='float64'):
"""Generalized Morse Wavelets, first order, L2(energy)-normalized.
See `help(_gmw.gmw)`.
"""
_check_args(gamma=gamma, beta=beta, allow_zerobeta=False)
wc = morsefreq(gamma, beta)
r = (2*beta + 1) / gamma
rgamma = gamma_fn(r)
(gamma, beta, wc, r, rgamma
) = _process_params_dtype(gamma, beta, wc, r, rgamma, dtype=dtype)
fn = _gmw_l2_gpu if USE_GPU() else (_gmw_l2_par if IS_PARALLEL() else _gmw_l2)
if centered_scale:
return lambda w: fn(S.atleast_1d(w * wc, dtype), gamma, beta, wc,
r, rgamma)
else:
return lambda w: fn(S.atleast_1d(w, dtype), gamma, beta, wc, r, rgamma)
@jit(nopython=True, cache=True)
def _gmw_l2(w, gamma, beta, wc, r, rgamma):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return np.sqrt(2.*pi * gamma * 2.**r / rgamma
) * w**beta * np.exp(-w**gamma) * w_nonneg
@jit(nopython=True, cache=True, parallel=True)
def _gmw_l2_par(w, gamma, beta, wc, r, rgamma):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return np.sqrt(2.*pi * gamma * 2.**r / rgamma
) * w**beta * np.exp(-w**gamma) * w_nonneg
def _gmw_l2_gpu(w, gamma, beta, wc, r, rgamma):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return torch.sqrt(2.*pi * gamma * 2.**r / rgamma
) * w**beta * torch.exp(-w**gamma) * w_nonneg
def gmw_l1_k(gamma=3., beta=60., k=1, centered_scale=False, dtype='float64'):
"""Generalized Morse Wavelets, `k`-th order, L1(bandpass)-normalized.
See `help(_gmw.gmw)`.
"""
_check_args(gamma=gamma, beta=beta, allow_zerobeta=False)
wc = morsefreq(gamma, beta)
k_consts = _gmw_k_constants(gamma, beta, k, norm='bandpass', dtype=dtype)
gamma, beta, wc = _process_params_dtype(gamma, beta, wc, dtype=dtype)
fn = (_gmw_l1_k_gpu if USE_GPU() else
(_gmw_l1_k_par if IS_PARALLEL() else _gmw_l1_k))
if centered_scale:
return lambda w: fn(S.atleast_1d(w * wc, dtype), gamma, beta, wc,
k_consts)
else:
return lambda w: fn(S.atleast_1d(w, dtype), gamma, beta, wc, k_consts)
@jit(nopython=True, cache=True)
def _gmw_l1_k(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = np.zeros(w.shape, dtype=w.dtype)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * np.exp(- beta * np.log(wc) + wc**gamma
+ beta * np.log(w) - w**gamma) * w_nonneg
@jit(nopython=True, cache=True, parallel=True)
def _gmw_l1_k_par(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = np.zeros(w.shape, dtype=w.dtype)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * np.exp(- beta * np.log(wc) + wc**gamma
+ beta * np.log(w) - w**gamma) * w_nonneg
def _gmw_l1_k_gpu(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = w.new_zeros(w.shape)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * torch.exp(- beta * torch.log(wc) + wc**gamma
+ beta * torch.log(w) - w**gamma) * w_nonneg
def gmw_l2_k(gamma=3., beta=60., k=1, centered_scale=False, dtype='float64'):
"""Generalized Morse Wavelets, `k`-th order, L2(energy)-normalized.
See `help(_gmw.gmw)`.
"""
_check_args(gamma=gamma, beta=beta, allow_zerobeta=False)
wc = morsefreq(gamma, beta)
k_consts = _gmw_k_constants(gamma, beta, k, norm='energy', dtype=dtype)
gamma, beta, wc = _process_params_dtype(gamma, beta, wc, dtype=dtype)
fn = (_gmw_l2_k_gpu if USE_GPU() else
(_gmw_l2_k_par if IS_PARALLEL() else _gmw_l2_k))
if centered_scale:
return lambda w: fn(S.atleast_1d(w * wc, dtype), gamma, beta, wc,
k_consts)
else:
return lambda w: fn(S.atleast_1d(w, dtype), gamma, beta, wc, k_consts)
@jit(nopython=True, cache=True)
def _gmw_l2_k(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = np.zeros(w.shape, dtype=w.dtype)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * np.exp(beta * np.log(w) - w**gamma) * w_nonneg
@jit(nopython=True, cache=True, parallel=True)
def _gmw_l2_k_par(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = np.zeros(w.shape, dtype=w.dtype)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * np.exp(beta * np.log(w) - w**gamma) * w_nonneg
def _gmw_l2_k_gpu(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = w.new_zeros(w.shape)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * torch.exp(beta * torch.log(w) - w**gamma) * w_nonneg
def _gmw_k_constants(gamma, beta, k, norm='bandpass', dtype='float64'):
"""Laguerre polynomial constants & `coeff` term.
Higher-order GMWs are coded such that constants are pre-computed and reused
for any `w` input, since they remain fixed for said order.
"""
r = (2 * beta + 1) / gamma
c = r - 1
# compute `coeff`
if norm == 'bandpass':
coeff = np.sqrt(np.exp(gammaln_fn(r) + gammaln_fn(k + 1) -
gammaln_fn(k + r)))
elif norm == 'energy':
coeff = np.sqrt(2*pi * gamma * (2**r) *
np.exp(gammaln_fn(k + 1) - gammaln_fn(k + r)))
# compute Laguerre polynomial constants
L_consts = np.zeros(k + 1, dtype=dtype)
for m in range(k + 1):
fact = np.exp(gammaln_fn(k + c + 1) - gammaln_fn(c + m + 1) -
gammaln_fn(k - m + 1))
L_consts[m] = (-1)**m * fact / gamma_fn(m + 1)
k_consts = L_consts * coeff
if norm == 'bandpass':
k_consts *= 2
k_consts = k_consts.astype(dtype)
return k_consts
#### General order wavelets (any `K`) ########################################
def morsewave(N, freqs, gamma=3, beta=60, K=1, norm='bandpass'):
"""Generalized Morse wavelets of Olhede and Walden (2002).
# Arguments:
N: int > 0
Number of samples / wavelet length
freqs: float / list / np.ndarray
(peak) center frequencies at which to generate wavelets,
in *radians* (i.e. `w` in `w = 2*pi*f`).
gamma, beta: float, float
GMW parameters; `(gamma, beta) = (3, 60)` yields optimal
time-frequency localization, and a good default for natural signals.
- smaller `beta`: greater time resolution, lower freq resolution.
- `gamma`: structurally alters the wavelet; 2 and 1 provide
superior time localization but poor joint localization.
See refs [2], [3].
K: int > 0
Will compute first `K` orthogonal GMWs, characterized by
orders 0 through `K - 1`.
Note this `K` is 1 greater than in original paper and than `order`
throughout `ssqueezepy`, but is consistent with jLAB.
norm: str['energy', 'bandpass']
Normalization to use. See `help(_gmw.gmw)`, and below.
# Returns:
psih: np.ndarray [N x len(freqs) x K]
Frequency-domain GMW, generated by sampling continuous-time function.
Will collapse dims of length 1 (e.g. if `K=0` or `freqs` is integer).
psi: np.ndarray [N x len(freqs) x K]
Time-domain GMW, centered, generated via inverse DFT of `psih`.
# References
See `help(_gmw.gmw)`.
__________________________________________________________________________
**`beta==0` case**
For BETA equal to zero, the generalized Morse wavelets describe
a non-zero-mean function which is not in fact a wavelet. Only 'bandpass'
normalization is supported for this case.
In this case the frequency speficies the half-power point of the
analytic lowpass filter.
The frequency-domain definition of MORSEWAVE is not necessarily
a good way to compute the zero-beta functions, however. You will
probably need to take a very small DT.
**Multiple orthogonal wavelets**
MORSEWAVE can compute multiple orthogonal versions of the generalized
Morse wavelets, characterized by the order K.
PSI=MORSEWAVE(N,K,GAMMA,BETA,FS) with a fifth numerical argument K
returns an N x LENGTH(FS) x K array PSI which contains time-domain
versions of the first K orthogonal generalized Morse wavelets.
These K different orthogonal wavelets have been employed in
multiwavelet polarization analysis, see Olhede and Walden (2003a,b).
Again either bandpass or energy normalization can be applied. With
bandpass normalization, all wavelets are divided by a constant, setting
the peak value of the first frequency-domain wavelet equal to 2.
"""
_check_args(gamma=gamma, beta=beta, norm=norm)
if not isinstance(freqs, (list, tuple, np.ndarray)):
freqs = [freqs]
psi = np.zeros((N, len(freqs), K), dtype='complex128')
psif = np.zeros((N, len(freqs), K))
for n, f in enumerate(freqs):
psif[:, n:n+1, :], psi[:, n:n+1, :] = _morsewave1(N, abs(f), gamma, beta,
K, norm)
if f < 0:
psi[:, n:n+1, :] = psi[:, n, :].conj()
psif[1:, n:n+1, :] = np.flip(psif[1:, n, :], axis=0)
psi = psi.squeeze()
psif = psif.squeeze()
return psif, psi
def _morsewave1(N, f, gamma, beta, K, norm):
"""See `help(_gmw.morsewave)`."""
fo = morsefreq(gamma, beta)
fact = f / fo
w = 2*pi * np.linspace(0, 1, N, endpoint=False) / fact
w = w.reshape(-1, 1)
with np.errstate(divide='ignore', invalid='ignore'):
if norm == 'energy':
if beta == 0:
psizero = np.exp(-w**gamma)
else:
# w**beta * exp(-w**gamma)
psizero = np.exp(beta * np.log(w) - w**gamma)
else:
if beta == 0:
psizero = 2 * np.exp(-w**gamma)
else:
# Alternate calculation to cancel things that blow up
psizero = 2 * np.exp(- beta * np.log(fo) + fo**gamma
+ beta * np.log(w) - w**gamma)
if beta == 0:
# Ensure nice lowpass filters for beta=0;
# Otherwise, doesn't matter since wavelets vanishes at zero frequency
psizero[0] /= 2 # Due to unit-step function
psizero[np.isnan(psizero) | np.isinf(psizero)] = 0.
X = _morsewave_first_family(fact, N, K, gamma, beta, w, psizero, norm)
X[np.isinf(X)] = 0.
Xr = X.copy()
# center time-domain wavelet
Xr *= (-1)**np.arange(len(Xr)).reshape(-1, 1, 1)
if len(Xr) % 2 == 0:
Xr[len(Xr) // 2] /= 2
x = ifft(Xr, axis=0)
return X, x
def _morsewave_first_family(fact, N, K, gamma, beta, w, psizero, norm):
"""See `help(_gmw.morsewave)`.
See Olhede and Walden, "Noise reduction in directional signals using
multiple Morse wavelets", IEEE Trans. Bio. Eng., v50, 51--57.
The equation at the top right of page 56 is equivalent to the
used expressions. Morse wavelets are defined in the frequency
domain, and so not interpolated in the time domain in the same way
as other continuous wavelets.
"""
r = (2 * beta + 1) / gamma
c = r - 1
L = np.zeros(w.shape)
psif = np.zeros((len(psizero), 1, K))
for k in range(K):
# Log of gamma function much better ... trick from Maltab's ``beta'`
if norm == 'energy':
A = morseafun(gamma, beta, k + 1, norm='energy')
coeff = np.sqrt(1. / fact) * A
elif norm == 'bandpass':
if beta == 0:
coeff = 1.
else:
coeff = np.sqrt(np.exp(gammaln_fn(r) + gammaln_fn(k + 1) -
gammaln_fn(k + r)))
L[:N//2 + 1] = laguerre(2 * w[:N//2 + 1]**gamma, k, c).reshape(-1, 1)
psif[:, :, k] = coeff * psizero * L
return psif
def morseafun(gamma, beta, k=1, norm='bandpass'):
"""GMW amplitude or a-function (evaluated). Used internally by other funcs.
# Arguments
k: int >= 1
Order of the wavelet; see `help(_gmw.morsewave)`.
gamma, beta: float, float
Wavelet parameters. See `help(_gmw.morsewave)`.
norm: str['energy', 'bandpass']
Wavelet normalization. See `help(_gmw.morsewave)`.
# Returns
A: float
GMW amplitude (freq-domain peak value).
______________________________________________________________________
Lilly, J. M. (2021), jLab: A data analysis package for Matlab, v1.6.9,
http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/morseafun.m
"""
if norm == 'energy':
r = (2*beta + 1) / gamma
A = np.sqrt(2*pi * gamma * (2**r) *
np.exp(gammaln_fn(k) - gammaln_fn(k + r - 1)))
elif norm == 'bandpass':
if beta == 0:
A = 2.
else:
wc = morsefreq(gamma, beta)
A = 2. / np.exp(beta * np.log(wc) - wc**gamma)
else:
raise ValueError("unsupported `norm`: %s;" % norm
+ "must be one of: 'bandpass', 'energy'.")
return A
def laguerre(x, k, c):
"""Generalized Laguerre polynomials. See `help(_gmw.morsewave)`.
LAGUERRE is used in the computation of the generalized Morse
wavelets and uses the expression given by Olhede and Walden (2002),
"Generalized Morse Wavelets", Section III D.
"""
x = np.atleast_1d(np.asarray(x).squeeze())
assert x.ndim == 1
y = np.zeros(x.shape)
for m in range(k + 1):
# Log of gamma function much better ... trick from Maltab's ``beta''
fact = np.exp(gammaln_fn(k + c + 1) - gammaln_fn(c + m + 1) -
gammaln_fn(k - m + 1))
y += (-1)**m * fact * x**m / gamma_fn(m + 1)
return y
def morsefreq(gamma, beta, n_out=1):
"""Frequency measures for GMWs (with F. Rekibi).
`n_out` controls how many parameters are computed and returned, in the
following order: `wm, we, wi, cwi`, where:
wm: modal / peak frequency
we: energy frequency
wi: instantaneous frequency at time-domain wavelet's center
cwi: curvature of instantaneous frequency at time-domain wavelet's center
All frequency quantities are *radian*, opposed to linear cyclic (i.e. `w`
in `w = 2*pi*f`).
For BETA=0, the "wavelet" becomes an analytic lowpass filter, and `wm`
is not defined in the usual way. Instead, `wm` is defined as the point
at which the filter has decayed to one-half of its peak power.
# References
[1] Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede. 2009.
https://sci-hub.st/10.1109/TSP.2008.2007607
[2] (c) Lilly, J. M. (2021), jLab: A data analysis package for Matlab,
v1.6.9, http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/morsefreq.m
"""
wm = (beta / gamma)**(1 / gamma)
if n_out > 1:
we = (1 / 2**(1 / gamma)) * (gamma_fn((2*beta + 2) / gamma) /
gamma_fn((2*beta + 1) / gamma))
if n_out > 2:
wi = (gamma_fn((beta + 2) / gamma) /
gamma_fn((beta + 1) / gamma))
if n_out > 3:
k2 = _morsemom(2, gamma, beta, n_out=3)[-1]
k3 = _morsemom(3, gamma, beta, n_out=3)[-1]
cwi = -(k3 / k2**1.5)
if n_out == 1:
return wm
elif n_out == 2:
return wm, we
elif n_out == 3:
return wm, we, wi
return wm, we, wi, cwi
def _morsemom(p, gamma, beta, n_out=4):
"""Frequency-domain `p`-th order moments of the first order GMW.
Used internally by other funcs.
`n_out` controls how many parameters are coMputed and returned, in the
following order: `Mp, Np, Kp, Lp`, where:
Mp: p-th order moment
Np: p-th order energy moment
Kp: p-th order cumulant
Lp: p-th order energy cumulant
The p-th order moment and energy moment are defined as
Mp = 1/(2 pi) int omegamma^p psi(omegamma) d omegamma
Np = 1/(2 pi) int omegamma^p |psi(omegamma)|.^2 d omegamma
respectively, where omegamma is the radian frequency. These are evaluated
using the 'bandpass' normalization, which has `max(abs(psih(omegamma)))=2`.
# References
[1] Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede. 2009.
https://sci-hub.st/10.1109/TSP.2008.2007607
[2] (c) Lilly, J. M. (2021), jLab: A data analysis package for Matlab,
v1.6.9, http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/morsemom.m
"""
def morsemom1(p, gamma, beta):
return morseafun(gamma, beta, k=1) * morsef(gamma, beta + p)
def morsef(gamma, beta):
# normalized first frequency-domain moment "f_{beta, gamma}" of the
# first-order GMW
return (1 / (2*pi * gamma)) * gamma_fn((beta + 1) / gamma)
Mp = morsemom1(p, gamma, beta)
if n_out > 1:
Np = (2 / 2**((1 + p) / gamma)) * morsemom1(p, gamma, 2*beta)
if n_out > 2:
prange = np.arange(p + 1)
moments = morsemom1(prange, gamma, beta)
cumulants = _moments_to_cumulants(moments)
Kp = cumulants[p]
if n_out > 3:
moments = (2 / 2**((1 + prange) / gamma)
) * morsemom1(prange, gamma, 2 * beta)
cumulants = _moments_to_cumulants(moments)
Lp = cumulants[p]
if n_out == 1:
return Mp
elif n_out == 2:
return Mp, Np
elif n_out == 3:
return Mp, Np, Kp
return Mp, Np, Kp, Lp
def _moments_to_cumulants(moments):
"""Convert moments to cumulants. Used internally by other funcs.
Converts the first N moments `moments =[M0,M1,...M{N-1}]`
into the first N cumulants `cumulants=[K0,K1,...K{N-1}]`.
Note for a probability density function, M0=1 and K0=0.
______________________________________________________________________
Lilly, J. M. (2021), jLab: A data analysis package for Matlab, v1.6.9,
http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/moms
"""
moments = np.atleast_1d(np.asarray(moments).squeeze())
assert moments.ndim == 1
cumulants = np.zeros(len(moments))
cumulants[0] = np.log(moments[0])
for n in range(1, len(moments)):
coeff = 0
for k in range(1, n):
coeff += nCk(n - 1, k - 1
) * cumulants[k] * (moments[n - k] / moments[0])
cumulants[n] = (moments[n] / moments[0]) - coeff
return cumulants
def _check_args(gamma=None, beta=None, norm=None, order=None, scale=None,
allow_zerobeta=True):
"""Only checks those that are passed in."""
if gamma is not None and gamma <= 0:
raise ValueError(f"`gamma` must be positive (got {gamma})")
if beta is not None:
if beta < 0:
kind = "non-negative" if allow_zerobeta else "positive"
raise ValueError(f"`beta` must be {kind} (got {beta})")
elif beta == 0 and not allow_zerobeta:
raise ValueError(f"`beta` cannot be zero (got {beta}); "
"use `_gmw.morsewave`, which supports it")
if norm is not None and norm not in ('bandpass', 'energy'):
raise ValueError(f"`norm` must be 'energy' or 'bandpass' (got '{norm}')")
if order is not None:
if (not isinstance(order, (int, float)) or
(isinstance(order, float) and not order.is_integer())):
raise TypeError("`order` must be integer (got %s)" % str(order))
elif order < 0:
raise ValueError("`order` must be >=0 (got %s)" % order)
if scale is not None and scale <= 0:
raise ValueError(f"`scale` must be positive (got {scale})")
| 29,146
| 36.657623
| 83
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/ridge_extraction.py
|
# -*- coding: utf-8 -*-
"""Authors: David Bondesson, John Muradeli
Ridge extraction from time-frequency representations (STFT, CWT, synchrosqueezed).
"""
import numpy as np
from numba import jit, prange
from .utils import EPS32, EPS64
def extract_ridges(Tf, scales, penalty=2., n_ridges=1, bw=15, transform='cwt',
get_params=False, parallel=True):
"""Tracks time-frequency ridges by performing forward-backward ridge tracking
algorithm, based on ref [1] (a version of Eq. III.4).
Also see: https://www.mathworks.com/help/signal/ref/tfridge.html
# Arguments:
Tf: np.ndarray
Complex time-frequency representation.
scales:
Frequency scales to calculate distance penalty term.
penalty: float
Value to penalize frequency jumps; multiplies the square of change
in frequency. Trialworthy values: 0.5, 2, 5, 20, 40. Higher reduces
odds of a ridge derailing to noise, but makes harder to track fast
frequency changes.
n_ridges: int
Number of ridges to be calculated.
bw: int
Decides how many bins will be subtracted around max energy frequency
bins when extracting multiple ridges (2 is standard for ssq'd).
See "bw selection".
transform: str['cwt', 'stft']
Treats `scales` logarithmically if 'cwt', else linearly.
`ssq_cwt` & `ssq_stft` are still 'cwt' & 'stft'.
get_params: bool (default False)
Whether to also compute and return `ridge_f` & `ridge_f`.
parallel: bool (default True)
Whether to use parallelized JIT code; runs faster on some input sizes.
# Returns
ridge_idxs: np.ndarray [n_timeshifts x n_ridges]
Indices for maximum frequency ridge(s).
ridge_f: np.ndarray [n_timeshifts x n_ridges]
Quantities corresponding to extracted ridges:
- STFT: frequencies
- CWT: scales
ridge_e: np.ndarray [n_timeshifts x n_ridges]
Energies corresponding to extracted ridges.
**bw selection**
When a component is extracted, a region around it (a number of bins above
and below the ridge) is zeroed and no longer affects next ridge's extraction.
- higher: more bins subtracted, lesser chance of selecting the same
component as the ridge.
- lower: less bins subtracted, lesser chance of dropping an unrelated
component before the component is considered.
- In general, set higher if more `scales` (or greater `nv`), or lower
frequency resolution:
- cwt: `wavelets.freq_resolution(wavelet, N, nondim=False)`
- stft: `utils.window_resolution(window)`
- `N = utils.p2up(len(x))[0]`
# References
1. On the extraction of instantaneous frequencies from ridges in
time-frequency representations of signals.
D. Iatsenko, P. V. E. McClintock, A. Stefanovska.
https://arxiv.org/pdf/1310.7276.pdf
"""
def generate_penalty_matrix(scales, penalty):
"""Penalty matrix describes all potential penalties of jumping from
current frequency (first axis) to one or several new frequencies (second
axis)
`scales`: frequency scale vector from time-freq transform
`penalty`: user-set penalty for freqency jumps (standard = 1.0)
"""
# subtract.outer(A, B) = [[A[0] - B[0], A[0] - B[1], ...],
# [A[1] - B[0], A[1] - B[1], ...],]
dist_matrix = penalty * np.subtract.outer(scales, scales)**2
return dist_matrix.squeeze()
def fw_bw_ridge_tracking(energy_to_track, penalty_matrix, eps):
"""Calculates acummulated penalty in forward (t=end...0) followed by
backward (t=end...0) direction
`energy`: squared abs time-frequency transform
`penalty_matrix`: pre calculated penalty for all potential jumps between
two frequencies
Returns: `ridge_idxs_fw_bw`: estimated forward backward frequency
ridge indices
"""
(penalized_energy_fw, ridge_idxs_fw
) = _accumulated_penalty_energy_fw(energy_to_track, penalty_matrix,
parallel)
# backward calculation of frequency ridge (min log negative energy)
ridge_idxs_fw_bw = _accumulated_penalty_energy_bw(
energy_to_track, penalty_matrix, penalized_energy_fw,
ridge_idxs_fw, eps, parallel)
return ridge_idxs_fw_bw
eps = EPS64 if Tf.dtype == np.cfloat else EPS32
dtype = np.float64 if Tf.dtype == np.cfloat else np.float32
scales, eps, penalty = [np.asarray(x, dtype=dtype)
for x in (scales, eps, penalty)]
scales_orig = scales.copy()
scales = (np.log(scales) if transform == 'cwt' else
scales).squeeze()
energy = np.abs(Tf)**2
n_timeshifts = Tf.shape[1]
ridge_idxs = np.zeros((n_timeshifts, n_ridges), dtype=int)
if get_params:
ridge_f = np.zeros((n_timeshifts, n_ridges), dtype=dtype)
ridge_e = np.zeros((n_timeshifts, n_ridges), dtype=dtype)
penalty_matrix = generate_penalty_matrix(scales, penalty)
for i in range(n_ridges):
energy_max = energy.max(axis=0)
energy_neg_log_norm = -np.log(energy / energy_max + eps)
ridge_idxs[:, i] = fw_bw_ridge_tracking(energy_neg_log_norm,
penalty_matrix, eps)
if get_params:
ridge_f[:, i] = scales_orig[ridge_idxs[:, i]]
ridge_e[:, i] = energy[ ridge_idxs[:, i], range(n_timeshifts)]
for time_idx in range(n_timeshifts):
ridx = ridge_idxs[time_idx, i]
energy[int(ridx - bw):int(ridx + bw), time_idx] = 0
return ((ridge_idxs, ridge_f, ridge_e) if get_params else
ridge_idxs)
def _accumulated_penalty_energy_fw(energy_to_track, penalty_matrix, parallel):
"""Calculates acummulated penalty in forward direction (t=0...end).
`energy_to_track`: squared abs time-frequency transform
`penalty_matrix`: pre-calculated penalty for all potential jumps between
two frequencies
# Returns:
`penalized_energy`: new energy with added forward penalty
`ridge_idxs`: calculated initial ridge with only forward penalty
"""
penalized_energy = energy_to_track.copy()
fn = (__accumulated_penalty_energy_fwp if parallel else
__accumulated_penalty_energy_fw)
fn(penalized_energy, penalty_matrix)
ridge_idxs = np.unravel_index(np.argmin(penalized_energy, axis=0),
penalized_energy.shape)[1]
return penalized_energy, ridge_idxs
@jit(nopython=True, cache=True)
def __accumulated_penalty_energy_fw(penalized_energy, penalty_matrix):
for idx_time in range(1, penalized_energy.shape[1]):
for idx_freq in range(0, penalized_energy.shape[0]):
penalized_energy[idx_freq, idx_time
] += np.amin(penalized_energy[:, idx_time - 1] +
penalty_matrix[idx_freq, :])
@jit(nopython=True, cache=True, parallel=True)
def __accumulated_penalty_energy_fwp(penalized_energy, penalty_matrix):
for idx_time in range(1, penalized_energy.shape[1]):
for idx_freq in prange(0, penalized_energy.shape[0]):
penalized_energy[idx_freq, idx_time
] += np.amin(penalized_energy[:, idx_time - 1] +
penalty_matrix[idx_freq, :])
def _accumulated_penalty_energy_bw(energy_to_track, penalty_matrix,
penalized_energy_fw, ridge_idxs_fw,
eps, parallel):
"""Calculates acummulated penalty in backward direction (t=end...0)
`energy_to_track`: squared abs time-frequency transform
`penalty_matrix`: pre calculated penalty for all potential jumps between
two frequencies
`ridge_idxs_fw`: calculated forward ridge
Returns: `ridge_idxs_fw`: new ridge with added backward penalty, int array
"""
pen_e = penalized_energy_fw
e = energy_to_track
fn = (__accumulated_penalty_energy_bwp if parallel else
__accumulated_penalty_energy_bw)
fn(e, penalty_matrix, pen_e, ridge_idxs_fw, eps)
return np.asarray(ridge_idxs_fw).astype(int)
@jit(nopython=True, cache=True)
def __accumulated_penalty_energy_bw(e, penalty_matrix, pen_e, ridge_idxs_fw, eps):
for idx_time in range(e.shape[1] - 2, -1, -1):
val = (pen_e[ridge_idxs_fw[idx_time + 1], idx_time + 1] -
e[ ridge_idxs_fw[idx_time + 1], idx_time + 1])
for idx_freq in range(e.shape[0]):
new_penalty = penalty_matrix[ridge_idxs_fw[idx_time + 1], idx_freq]
if abs(val - (pen_e[idx_freq, idx_time] + new_penalty)) < eps:
ridge_idxs_fw[idx_time] = idx_freq
@jit(nopython=True, cache=True, parallel=True)
def __accumulated_penalty_energy_bwp(e, penalty_matrix, pen_e, ridge_idxs_fw,
eps):
# adding `prange` to `tidx` makes whole computation much faster (x3-4),
# but breaks it on *some* inputs (unpredictably)
for tidx in range(e.shape[1] - 1):
# `prange` only supports a step size of 1, so we use a trick
# actually can't `prange` `tidx`, not thread-safe
idx_time = (e.shape[1] - 2) - tidx
val = (pen_e[ridge_idxs_fw[idx_time + 1], idx_time + 1] -
e[ ridge_idxs_fw[idx_time + 1], idx_time + 1])
for idx_freq in prange(e.shape[0]):
new_penalty = penalty_matrix[ridge_idxs_fw[idx_time + 1], idx_freq]
if abs(val - (pen_e[idx_freq, idx_time] + new_penalty)) < eps:
ridge_idxs_fw[idx_time] = idx_freq
| 10,013
| 41.978541
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/_ssq_cwt.py
|
# -*- coding: utf-8 -*-
import numpy as np
from .utils import EPS32, EPS64, pi, p2up, adm_ssq, process_scales
from .utils import trigdiff, _process_fs_and_t
from .utils import backend as S
from .algos import replace_under_abs, phase_cwt_cpu, phase_cwt_gpu
from .ssqueezing import ssqueeze, _check_ssqueezing_args
from .wavelets import Wavelet
from ._cwt import cwt
def ssq_cwt(x, wavelet='gmw', scales='log-piecewise', nv=None, fs=None, t=None,
ssq_freqs=None, padtype='reflect', squeezing='sum', maprange='peak',
difftype='trig', difforder=None, gamma=None, vectorized=True,
preserve_transform=None, astensor=True, order=0, nan_checks=None,
patience=0, flipud=True, cache_wavelet=None,
get_w=False, get_dWx=False):
"""Synchrosqueezed Continuous Wavelet Transform.
Implements the algorithm described in Sec. III of [1].
Uses `wavelet.dtype` precision.
# Arguments:
x: np.ndarray / torch.Tensor
Input vector(s), 1D or 2D. See `help(cwt)`.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain. See `help(cwt)`.
scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray
CWT scales. See `help(cwt)`.
nv: int / None
Number of voices (wavelets per octave). Suggested >= 16.
fs, t: float, np.ndarray
See `help(_cwt.cwt)`.
ssq_freqs: str['log', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
padtype: str / None
Pad scheme to apply on input. See `help(utils.padsignal)`.
`None` -> no padding.
squeezing: str['sum', 'lebesgue'] / function
See `help(ssqueezing.ssqueeze)`.
maprange: str['maximal', 'peak', 'energy'] / tuple(float, float)
Kind of frequency mapping used, determining the range of frequencies
spanned (fm to fM, min to max).
- 'maximal': fm=1/dT, fM=1/(2*dt), always. Data's fundamental
and Nyquist frequencies, determined from `fs` (or `t`).
Other mappings can never span outside this range.
- ('peak', 'energy'): sets fm and fM based on center frequency
associated with `wavelet` at maximum and minimum scale,
respectively. See `help(wavelets.center_frequency)`.
- 'peak': the frequency-domain trimmed bell will have its peak
at Nyquist, meaning all other frequencies are beneath, so each
scale is still correctly resolved but with downscaled energies.
With sufficiently-spanned `scales`, coincides with 'maximal'.
- 'energy': however, the bell's spectral energy is centered
elsewhere, as right-half of bell is partly or entirely trimmed
(left-half can be trimmed too). Use for energy-centric mapping,
which for sufficiently-spanned `scales` will always have lesser
fM (but ~same fM).
- tuple: sets `ssq_freqrange` directly.
difftype: str['trig', 'phase', 'numeric']
Method by which to differentiate Wx (default='trig') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'trig': use `dWx`, obtained via trigonometric (frequency-domain
interpolant) differentiation (see `cwt`, `phase_cwt`).
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx` (see `phase_cwt`).
- 'numeric': first-, second-, or fourth-order (set by `difforder`)
numeric differentiation (see `phase_cwt_num`).
difforder: int[1, 2, 4]
Order of differentiation for difftype='numeric' (default=4).
gamma: float / None
CWT phase threshold. Sets `w=inf` for small values of `Wx` where
phase computation is unstable and inaccurate (like in DFT):
w[abs(Wx) < beta] = inf
This is used to zero `Wx` where `w=0` in computing `Tx` to ignore
contributions from points with indeterminate phase.
Default = 10 * (machine epsilon) = 10 * np.finfo(np.float64).eps
(or float32)
It is recommended to standardize the input, or at least not
pass a small-valued input, to avoid false filtering by `gamma`,
especially if input obeys a power scaling law
(e.g. `~1/f` with EEG/MEG, and similar with audio).
# TODO warn user if `x.max()` is small?
vectorized: bool (default True)
Whether to vectorize CWT, i.e. compute quantities for all scales at
once, which is faster but uses more memory.
preserve_transform: bool (default None) / None
Whether to return `Wx` as directly output from `cwt` (it might be
altered by `ssqueeze` or `phase_transform`). Uses more memory
per storing extra copy of `Wx`.
- Defaults to True if `'SSQ_GPU' == '0'`, else False.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
order: int (default 0) / tuple[int]
`order > 0` computes ssq of `cwt` taken with higher-order GMWs.
If tuple, computes ssq of average of `cwt`s taken at each specified
order. See `help(_cwt.cwt_higher_order)`.
nan_checks: bool / None
Checks whether input has `nan` or `inf` values, and zeros them.
`False` saves compute. Doesn't support torch inputs.
Defaults to `True` for NumPy inputs, else `False`.
patience: int / tuple[int, int]
pyFFTW parameter for faster FFT on CPU; see `help(ssqueezepy.FFT)`.
flipud: bool (default True)
See `help(ssqueeze)`.
cache_wavelet: bool (default None) / None
See `help(cwt)`.
get_w, get_dWx: bool (default False)
`get_w`:
True: will compute phase transform separately, assign it to
array `w` and return it.
False: will compute synchrosqueezing directly from `Wx` and
`dWx` without assigning to intermediate array, which is faster
(by 20-30%) and takes less memory.
`get_dWx`:
True: will return dWx
False: discards dWx after computing `w` or synchrosqueezing.
`get_dWx=True` with `get_w=True` uses most memory.
These options do not affect `Tx`.
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
Wx: np.ndarray [na x n]
Continuous Wavelet Transform of `x`, L1-normed (see `cwt`).
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
scales: np.ndarray [na]
Scales associated with rows of `Wx`.
w: np.ndarray [na x n] (if `get_w=True`)
Phase transform for each element of `Wx`.
dWx: [na x n] np.ndarray (if `get_dWx=True`)
See `help(_cwt.cwt)`.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
def _process_args(x, scales, fs, t, nv, difftype, difforder, squeezing,
maprange, wavelet, get_w):
if x.ndim == 2 and get_w:
raise NotImplementedError("`get_w=True` unsupported with batched "
"input.")
difforder = _check_ssqueezing_args(squeezing, maprange, wavelet,
difftype, difforder, get_w,
transform='cwt')
if nv is None and not isinstance(scales, np.ndarray):
nv = 32
N = x.shape[-1]
dt, fs, t = _process_fs_and_t(fs, t, N)
return N, dt, fs, difforder, nv
def _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder):
if difftype == 'trig':
# calculate instantaneous frequency directly from the
# frequency-domain derivative
w = phase_cwt(Wx, dWx, difftype, gamma)
elif difftype == 'phase':
# !!! bad; yields negatives, and forcing abs(w) doesn't help
# calculate inst. freq. from unwrapped phase of CWT
w = phase_cwt(Wx, None, difftype, gamma)
elif difftype == 'numeric':
# !!! tested to be very inaccurate for small scales
# calculate derivative numerically
_, n1, _ = p2up(N)
Wx = Wx[:, (n1 - 4):(n1 + N + 4)]
w = phase_cwt_num(Wx, dt, difforder, gamma)
return Wx, w
N, dt, fs, difforder, nv = _process_args(x, scales, fs, t, nv, difftype,
difforder, squeezing, maprange,
wavelet, get_w)
wavelet = Wavelet._init_if_not_isinstance(wavelet, N=N)
# CWT with higher-order GMWs
if isinstance(order, (tuple, list, range)) or order > 0:
# keep padding for `trigdiff`
kw = dict(wavelet=wavelet, scales=scales, fs=fs, nv=nv,
l1_norm=True, derivative=False, padtype=padtype, rpadded=True,
vectorized=vectorized, astensor=True,
cache_wavelet=cache_wavelet, nan_checks=nan_checks)
_, n1, _ = p2up(N)
average = isinstance(order, (tuple, list, range))
Wx, scales = cwt(x, order=order, average=average, **kw)
dWx = trigdiff(Wx, fs, rpadded=True, N=N, n1=n1)
Wx = Wx[:, n1:n1 + N]
if S.is_tensor(Wx):
Wx = Wx.contiguous()
scales, cwt_scaletype, *_ = process_scales(scales, N, wavelet, nv=nv,
get_params=True)
# regular CWT
if order == 0:
# l1_norm=True to spare a multiplication; for SSQ_CWT L1 & L2 are exactly
# same anyway since we're inverting CWT over time-frequency plane
rpadded = (difftype == 'numeric')
Wx, scales, dWx = cwt(x, wavelet, scales=scales, fs=fs, nv=nv,
l1_norm=True, derivative=True, padtype=padtype,
rpadded=rpadded, vectorized=vectorized,
astensor=True, patience=patience,
cache_wavelet=cache_wavelet, nan_checks=nan_checks)
# make copy of `Wx` if specified
if preserve_transform is None:
preserve_transform = not S.is_tensor(Wx)
if preserve_transform:
_Wx = (Wx.copy() if not S.is_tensor(Wx) else
Wx.detach().clone())
else:
_Wx = Wx
# gamma
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Wx, 'complex128') else EPS32)
# compute `w` if `get_w` and free `dWx` from memory if `not get_dWx`
if get_w:
_Wx, w = _phase_transform(_Wx, dWx, N, dt, gamma, difftype, difforder)
_dWx = None # don't use in `ssqueeze`
if not get_dWx:
dWx = None
else:
w = None
_dWx = dWx
# default to same scheme used by `scales`
if ssq_freqs is None:
ssq_freqs = cwt_scaletype
# affects `maprange` computation if non-tuple
was_padded = bool(padtype is not None)
# synchrosqueeze
Tx, ssq_freqs = ssqueeze(_Wx, w, ssq_freqs, scales, fs=fs,
squeezing=squeezing, maprange=maprange,
wavelet=wavelet, gamma=gamma, was_padded=was_padded,
flipud=flipud, dWx=_dWx, transform='cwt')
# postprocessing & return
if difftype == 'numeric':
Wx = Wx[:, 4:-4]
Tx = Tx[:, 4:-4]
w = w[:, 4:-4] if w is not None else None
if not astensor and S.is_tensor(Tx):
Tx, Wx, w, dWx, scales, ssq_freqs = [
g.cpu().numpy() if S.is_tensor(g) else g
for g in (Tx, Wx, w, dWx, scales, ssq_freqs)]
scales = scales.squeeze()
if get_w and get_dWx:
return Tx, Wx, ssq_freqs, scales, w, dWx
elif get_w:
return Tx, Wx, ssq_freqs, scales, w
elif get_dWx:
return Tx, Wx, ssq_freqs, scales, dWx
else:
return Tx, Wx, ssq_freqs, scales
def issq_cwt(Tx, wavelet='gmw', cc=None, cw=None):
"""Inverse synchrosqueezing transform of `Tx` with associated frequencies
in `fs` and curve bands in time-frequency plane specified by `Cs` and
`freqband`. This implements Eq. 15 of [1].
# Arguments:
Tx: np.ndarray
Synchrosqueezed CWT of `x` (see `ssq_cwt`).
(rows=~frequencies, cols=timeshifts)
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet that was used to compute Tx, sampled in Fourier
frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
cc, cw: np.ndarray / None
Curve centerpoints, and curve (vertical) widths (bandwidths),
together defining the portion of Tx to invert over to extract
K "components" per Modulation Model:
x_k(t) = A_k(t) cos(phi_k(t)) + res; k=0,...,K-1
where K=len(c)==len(cw), and `res` is residual error (inversion
over portion leftover/uncovered by cc, cw).
None = full inversion.
# Returns:
x: np.ndarray [K x Tx.shape[1]]
Components of reconstructed signal, and residual error.
If cb & cw are None, x.shape == (Tx.shape[1],). See `cb, cw`.
# Example:
Tx, *_ = ssq_cwt(x, 'gmw') # synchrosqueezed CWT
x = issq_cwt(Tx, 'gmw') # reconstruction
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
4. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_iw.m
"""
cc, cw, full_inverse = _process_component_inversion_args(cc, cw)
if full_inverse:
# Integration over all frequencies recovers original signal
x = Tx.real.sum(axis=0)
else:
x = _invert_components(Tx, cc, cw)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
Css = adm_ssq(wavelet) # admissibility coefficient
# *2 per analytic wavelet & taking real part; Theorem 4.5 [2]
x *= (2 / Css)
return x
def _invert_components(Tx, cc, cw):
# Invert Tx around curve masks in the time-frequency plane to recover
# individual components; last one is the remaining signal
x = np.zeros((cc.shape[1] + 1, cc.shape[0]))
TxRemainder = Tx.copy()
for n in range(cc.shape[1]):
TxMask = np.zeros(Tx.shape, dtype='complex128')
upper_cc = np.clip(cc[:, n] + cw[:, n], 0, len(Tx))
lower_cc = np.clip(cc[:, n] - cw[:, n], 0, len(Tx))
# cc==-1 denotes no curve at that time,
# removing such points from inversion
upper_cc[np.where(cc[:, n] == -1)] = 0
lower_cc[np.where(cc[:, n] == -1)] = 1
for m in range(Tx.shape[1]):
idxs = slice(lower_cc[m], upper_cc[m] + 1)
TxMask[idxs, m] = Tx[idxs, m]
TxRemainder[idxs, m] = 0
x[n] = TxMask.real.sum(axis=0).T
x[n + 1] = TxRemainder.real.sum(axis=0).T
return x
def _process_component_inversion_args(cc, cw):
if (cc is None) and (cw is None):
full_inverse = True
else:
full_inverse = False
if cc.ndim == 1:
cc = cc.reshape(-1, 1)
if cw.ndim == 1:
cw = cw.reshape(-1, 1)
cc = cc.astype('int32')
cw = cw.astype('int32')
return cc, cw, full_inverse
def phase_cwt(Wx, dWx, difftype='trig', gamma=None, parallel=None):
"""Calculate the phase transform at each (scale, time) pair:
w[a, b] = Im((1/2pi) * d/db (Wx[a,b]) / Wx[a,b])
See above Eq 20.3 in [1], or Eq 13 in [2].
# Arguments:
Wx: np.ndarray
CWT of `x` (see `help(cwt)`).
dWx: np.ndarray.
Time-derivative of `Wx`, computed via frequency-domain differentiation
(effectively, derivative of trigonometric interpolation; see [4]).
difftype: str['trig', 'phase']
Method by which to differentiate Wx (default='trig') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'trig': using `dWx, the time-derivative of the CWT of `x`,
computed via frequency-domain differentiation (effectively,
derivative of trigonometric interpolation; see [4]). Implements
as described in Sec IIIB of [2].
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx`. Does not support GPU or multi-threaded
CPU execution.
gamma: float / None
See `help(ssqueezepy.ssq_cwt)`.
parallel: bool (default `ssqueezepy.IS_PARALLEL()`)
Whether to use multiple CPU threads (ignored if input is tensor).
# Returns:
w: np.ndarray
Phase transform for each element of `Wx`. w.shape == Wx.shape.
# References:
1. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. The Exponential Accuracy of Fourier and Chebyshev Differencing Methods.
E. Tadmor.
http://webhome.auburn.edu/~jzl0097/teaching/math_8970/Tadmor_86.pdf
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
phase_cwt.m
"""
def _process_input(Wx, parallel, gamma):
S.warn_if_tensor_and_par(Wx, parallel)
gpu = S.is_tensor(Wx)
if difftype != 'trig':
if gpu:
raise ValueError("`difftype != 'trig'` unsupported with tensor "
"inputs.")
elif parallel:
raise ValueError("`difftype != 'trig'` unsupported with "
"`parallel`.")
if gamma is None:
gamma = np.sqrt(EPS64 if S.is_dtype(Wx, 'complex128') else EPS32)
return gamma, gpu
gamma, gpu = _process_input(Wx, parallel, gamma)
if difftype == 'trig':
if gpu:
w = phase_cwt_gpu(Wx, dWx, gamma)
else:
w = phase_cwt_cpu(Wx, dWx, gamma, parallel)
elif difftype == 'phase':
# TODO gives bad results; shouldn't we divide by Wx?
u = np.unwrap(np.angle(Wx)).T
w = np.vstack([np.diff(u, axis=0), u[-1] - u[0]]).T / (2*pi)
np.abs(w, out=w)
replace_under_abs(w, ref=Wx, value=gamma, replacement=np.inf)
else:
raise ValueError(f"unsupported `difftype` '{difftype}'; must be one of "
"'trig', 'phase'.")
return w
def phase_cwt_num(Wx, dt, difforder=4, gamma=None):
"""Calculate the phase transform at each (scale, time) pair:
w[a, b] = Im((1/2pi) * d/db (Wx[a,b]) / Wx[a,b])
Uses numeric differentiation (1st, 2nd, or 4th order). See above Eq 20.3
in [1], or Eq 13 in [2].
# Arguments:
Wx: np.ndarray
CWT of `x` (see `cwt`).
dt: float
Sampling period (e.g. t[1] - t[0]).
difforder: int[1, 2, 4]
Order of differentiation (default=4).
gamma: float
See `help(ssqueezepy.ssq_cwt)`.
# Returns:
w: np.ndarray
Phase transform via demodulated FM-estimates. w.shape == Wx.shape.
# References:
1. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
phase_cwt_num.m
"""
# unreliable; bad results on high freq pure tones
def _differentiate(Wx, dt):
if difforder in (2, 4):
# append for differentiating
Wxr = np.hstack([Wx[:, -2:], Wx, Wx[:, :2]])
if difforder == 1:
w = np.hstack([Wx[:, 1:] - Wx[:, :-1],
Wx[:, :1] - Wx[:, -1:]])
w /= dt
elif difforder == 2:
# calculate 2nd-order forward difference
w = -Wxr[:, 4:] + 4 * Wxr[:, 3:-1] - 3 * Wxr[:, 2:-2]
w /= (2 * dt)
elif difforder == 4:
# calculate 4th-order central difference
w = -Wxr[:, 4:]
w += Wxr[:, 3:-1] * 8
w -= Wxr[:, 1:-3] * 8
w += Wxr[:, 0:-4]
w /= (12 * dt)
return w
if difforder not in (1, 2, 4):
raise ValueError("`difforder` must be one of: 1, 2, 4 "
"(got %s)" % difforder)
w = _differentiate(Wx, dt)
# calculate inst. freq for each scale
# 2*pi norm per discretized inverse FT rather than inverse DFT
w = np.real(-1j * w / Wx) / (2*pi)
# epsilon from Daubechies, H-T Wu, et al.
# gamma from Brevdo, H-T Wu, et al.
gamma = gamma or 10 * (EPS64 if Wx.dtype == np.cfloat else EPS32)
w[np.abs(Wx) < gamma] = np.inf
# see `phase_cwt`, though negatives may no longer be in minority
w = np.abs(w)
return w
| 24,265
| 40.059222
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/wavelets.py
|
# -*- coding: utf-8 -*-
import numpy as np
import gc
from numba import jit
from types import FunctionType
from scipy import integrate
from .algos import find_maximum
from .configs import gdefaults, USE_GPU, IS_PARALLEL
from .utils import backend as S
from .utils.fft_utils import ifft, fftshift, ifftshift
from .utils.backend import torch, Q, atleast_1d
class Wavelet():
"""Central wavelet class. `__call__` computes Fourier frequency-domain
wavelet, `psih`, `.psifn` computes time-domain wavelet, `psi`.
`Wavelet.SUPPORTED` for names of built-in wavelets passable to `__init__()`;
`Wavelet.VISUALS` for names of visualizations passable to `viz()`.
`viz()` to run visuals, `info()` to print relevant wavelet info.
# Arguments:
wavelet: str / tuple[str, dict] /FunctionType
Name of supported wavelet (must be one of `Wavelet.SUPPORTED`)
or custom function. Or tuple, name of wavelet and its configs,
e.g. `('morlet', {'mu': 5})`.
N: int
Default length of wavelet.
dtype: str / type (np.dtype) / None
dtype at which wavelets are generated; can't change after __init__.
Must be one of `Wavelet.DTYPES`. If None, uses value from
`configs.ini`, global (if set) or wavelet-specific.
'float32' is unsupported for GMW's `norm='energy'` and will be
overridden by 'float64' (with a warning if it was passed to __init__).
# Example:
wavelet = Wavelet(('morlet', {'mu': 7}), N=1024)
plt.plot(wavelet(scale=8))
"""
SUPPORTED = {'gmw', 'morlet', 'bump', 'cmhat', 'hhhat'}
VISUALS = {'time-frequency', 'heatmap', 'waveforms', 'filterbank',
'harea', 'std_t', 'std_w', 'anim:time-frequency'}
DTYPES = {'float32', 'float64'}
# TODO ensure everything is accounted
# Attributes whose data is stored on GPU (if env flag 'SSQ_GPU' == '1')
ON_GPU = {'xi', '_Psih', '_Psih_scale'}
# Time-frequency attributes
TF_PROPS = {'wc', 'wc_ct', 'scalec_ct', 'std_t', 'std_w',
'std_t_d', 'std_w_d'}
def __init__(self, wavelet='gmw', N=1024, dtype=None):
self._dtype = self._process_dtype(dtype, as_str=True
) if dtype is not None else None
self._validate_and_set_wavelet(wavelet)
self.N = N # also sets _xi
#### Main methods / properties ###########################################
def __call__(self, w=None, *, scale=None, N=None, nohalf=True, imag_th=1e-8):
"""wavelet(w) if called with positional argument, w = float or array, else
wavelet(scale * xi), where `xi` is recomputed if `N` is not None.
`nohalf=False` (default=True) halves the Nyquist bin for even-length
psih to ensure proper time-domain wavelet decay and analyticity:
https://github.com/jonathanlilly/jLab/issues/13
If evaluated wavelet's imaginary component is less than `imag_th`*(sum of
real), will drop it; set to None to disable.
"""
if w is not None:
psih = self.fn(S.asarray(w, self.dtype))
else:
psih = self.fn(self.xifn(scale, N))
if not nohalf:
psih = self._halve_nyquist(psih)
if (S.is_dtype(psih, ('complex64', 'complex128')) and
(imag_th is not None) and
(psih.imag.sum() / psih.real.sum() < imag_th)):
psih = psih.real
return psih
@staticmethod
def _halve_nyquist(psih):
"""https://github.com/jonathanlilly/jLab/issues/13"""
N = len(psih) if psih.ndim == 1 else psih.shape[1]
if N % 2 == 0:
if psih.ndim == 1:
psih[N//2] /= 2
else:
psih[:, N//2] /= 2
return psih
def psifn(self, w=None, *, scale=None, N=None):
"""Compute time-domain wavelet; simply `ifft(psih)` with appropriate
extra steps.
"""
psih = self(w, scale=scale, N=N, nohalf=False)
if psih.ndim in (1, 2):
pn = (-1)**S.arange(psih.shape[-1], dtype=self.dtype)
else:
raise ValueError("`psih` must yield to 1D or 2D (got %s)" % psih.ndim)
# * pn = freq-domain spectral reversal to center time-domain wavelet
psi = ifft(psih * pn, axis=-1)
return psi
def xifn(self, scale=None, N=None):
"""Computes `xi`, radian frequencies at which `wavelet` is sampled,
as fraction of sampling frequency: 0 to pi & -pi to 0, scaled by
`scale` - or more precisely:
N=128: [0, 1, 2, ..., 64, -63, -62, ..., -1] * (2*pi / N) * scale
N=129: [0, 1, 2, ..., 64, -64, -63, ..., -1] * (2*pi / N) * scale
"""
if isinstance(scale, (np.ndarray, torch.Tensor)) and len(scale) > 1:
if scale.squeeze().ndim > 1:
raise ValueError("2D `scale` unsupported")
elif scale.ndim == 1:
scale = scale.reshape(-1, 1) # add dim for proper broadcast
elif scale is None:
scale = 1.
scale = S.asarray(scale, dtype=self.dtype)
if N is None:
xi = scale * self.xi
else:
xi = scale * S.asarray(_xifn(scale=1., N=N,
dtype=getattr(np, self.dtype)))
return xi
def Psih(self, scale=None, N=None, nohalf=True):
"""Return pre-computed `psih` at scale(s) `scale` of length `N` if
same `scale` & `N` were passed previously, else compute anew.
`dtype` will override `self.dtype` if not None.
If both `scale` & `N` are None, will return previously computed `Psih`.
"""
pN = getattr(self, '_Psih_N', S.array([-1]))
ps = getattr(self, '_Psih_scale', S.array([-1]))
N_is_None = N is None
N = N or self.N
if ((scale is None and N_is_None) or
(N == pN and (len(scale) == len(ps) and S.allclose(scale, ps)))):
return self._Psih
# first empty existing to free memory
if getattr(self, '_Psih', None) is not None:
self._Psih = None
gc.collect()
self._Psih = self(scale=scale, N=N, nohalf=nohalf)
self._Psih_N = N
self._Psih_scale = scale
return self._Psih
@property
def N(self):
"""Default value used when `N` is not passed to a `Wavelet` method."""
return self._N
@N.setter
def N(self, value):
"""Ensure `xi` always matches `N`."""
self._N = value
self._xi = S.asarray(_xifn(scale=1, N=value,
dtype=getattr(np, self.dtype)))
@property
def xi(self):
"""`xi` computed at `scale=1` and `N=self.N`. See `help(Wavelet.xifn)`."""
return self._xi
@property
def dtype(self):
"""dtype at which psih and psi are generated; can't change post-init."""
return self._dtype
#### Properties ##########################################################
@property
def name(self):
"""Name of underlying freq-domain function, processed by
`wavelets._fn_to_name`.
"""
return _fn_to_name(self.fn)
@property
def config_str(self):
"""`self.config` formatted into a nice string."""
if self.config:
cfg = ""
for k, v in self.config.items():
if k in ('norm', 'centered_scale', 'dtype'):
# too long, no real need
continue
elif k == 'order' and v == 0:
# no need to include base wavelet's order
continue
elif isinstance(v, float) and v.is_integer():
v = int(v)
cfg += "{}={}, ".format(k, v)
cfg = cfg.rstrip(', ')
else:
cfg = "Default configs"
return cfg
@property
def wc(self):
"""Energy center frequency at scale=scalec_ct [(radians*cycles)/samples]
Ideally we'd compute at `scale=1`, but that's trouble for 'energy' center
frequency; see `help(wavelets.center_frequency)`. Away from scale
extrema, 'energy' and 'peak' are same for bell-like |wavelet(w)|^2.
Reported as "dimensional" in `info()` since it's tied to same `scale`
used for computing `std_t_d` & `std_t_w`
"""
if getattr(self, '_wc', None) is None:
self._wc = center_frequency(self, scale=self.scalec_ct, N=self.N,
kind='energy')
return self._wc
@property
def wc_ct(self):
"""'True' radian peak center frequency, i.e. `w` which maximizes the
underlying continuous-time function. Can be used to find `scale`
that centers the wavelet anywhere from 0 to pi in discrete space.
Reported as "nondimensional" in `info()` since it's scale-decoupled.
"""
if getattr(self, '_wc_ct', None) is None:
self._wc_ct = center_frequency(self, kind='peak-ct', N=self.N)
return self._wc_ct
@property
def scalec_ct(self):
"""'Center scale' in sense of `wc_ct`, making wavelet peak at pi/4.
See `help(Wavelet.wc_ct)`.
"""
if getattr(self, '_scalec_ct', None) is None:
self._scalec_ct = (4/pi) * self.wc_ct
return self._scalec_ct
@property
def std_t(self):
"""Non-dimensional time resolution"""
if getattr(self, '_std_t', None) is None:
# scale=10 arbitrarily chosen to yield good compute-accurary
self._std_t = time_resolution(self, scale=self.scalec_ct, N=self.N,
nondim=True)
return self._std_t
@property
def std_w(self):
"""Non-dimensional frequency resolution (radian)"""
if getattr(self, '_std_w', None) is None:
self._std_w = freq_resolution(self, scale=self.scalec_ct, N=self.N,
nondim=True)
return self._std_w
@property
def std_f(self):
"""Non-dimensional frequency resolution (cyclic)"""
return self.std_w / (2*pi)
@property
def harea(self):
"""Heisenberg area: std_t * std_w >= 0.5"""
return self.std_t * self.std_w
@property
def std_t_d(self):
"""Dimensional time resolution [samples/(cycles*radians)]"""
if getattr(self, '_std_t_d', None) is None:
self._std_t_d = time_resolution(self, scale=self.scalec_ct, N=self.N,
nondim=False)
return self._std_t_d
@property
def std_w_d(self):
"""Dimensional frequency resolution [(cycles*radians)/samples]"""
if getattr(self, '_std_w_d', None) is None:
self._std_w_d = freq_resolution(self, scale=self.scalec_ct, N=self.N,
nondim=False)
return self._std_w_d
@property
def std_f_d(self):
"""Dimensional frequency resolution [cycles/samples]"""
return self.std_w_d / (2*pi)
#### Misc ################################################################
def info(self, nondim=True, reset=False):
"""Prints time & frequency resolution quantities. Refer to pertinent
methods' docstrings on how each quantity is computed, and to
tests/props_test.py on various dependences (e.g. `std_t` on `N`).
If `reset`, will recompute all quantities (can be used with e.g. new `N`).
See `help(Wavelet.x)`, x: `std_t, std_w, wc, wc_ct, scalec_ct`.
Detailed overview: https://dsp.stackexchange.com/q/72042/50076
"""
if reset:
self.reset_properties()
if nondim:
cfg = self.config_str
dim_t = dim_w = "non-dimensional"
std_t, std_w = self.std_t, self.std_w
wc_txt = "wc_ct, (cycles*radians)"
wc = self.wc_ct
else:
cfg = self.config_str + " -- scale=%.2f" % self.scalec_ct
dim_t = "samples/(cycles*radians)"
dim_w = "(cycles*radians)/samples"
std_t, std_w = self.std_t_d, self.std_w_d
wc_txt = "wc, (cycles*radians)/samples; %.2f" % self.scalec_ct
wc = self.wc
harea = std_t * std_w
print(("{} wavelet\n"
"\t{}\n"
"\tCenter frequency: {:<10.6f} [{}]\n"
"\tTime resolution: {:<10.6f} [std_t, {}]\n"
"\tFreq resolution: {:<10.6f} [std_w, {}]\n"
"\tHeisenberg area: {:.12f}"
).format(self.name, cfg, wc, wc_txt,
std_t, dim_t, std_w, dim_w, harea))
def reset_properties(self):
"""Reset time-frequency properties (`Wavelet.TF_PROPS`), i.e.
recompute for current `self.N`.
"""
for name in self.TF_PROPS:
setattr(self, f'_{name}', None)
getattr(self, name) # trigger recomputation
def viz(self, name='overview', **kw):
"""`Wavelet.VISUALS` for list of supported `name`s."""
if name == 'overview':
for name in ('heatmap', 'harea', 'filterbank', 'time-frequency'):
kw['N'] = kw.get('N', self.N)
self._viz(name, **kw)
elif name not in Wavelet.VISUALS:
raise ValueError(f"visual '{name}' not supported; must be one of: "
+ ', '.join(Wavelet.VISUALS))
else:
self._viz(name, **kw)
def _viz(self, name, **kw):
kw['wavelet'] = kw.get('wavelet', self)
kw['N'] = kw.get('N', self.N)
{
'heatmap': visuals.wavelet_heatmap,
'waveforms': visuals.wavelet_waveforms,
'filterbank': visuals.wavelet_filterbank,
'harea': visuals.sweep_harea,
'std_t': visuals.sweep_std_t,
'std_w': visuals.sweep_std_w,
'time-frequency': visuals.wavelet_tf,
'anim:time-frequency': visuals.wavelet_tf_anim,
}[name](**kw)
def _desc(self, N=None, scale=None, show_N=True):
"""Nicely-formatted parameter summary, used in other methods"""
if self.config_str != "Default configs":
ptxt = self.config_str.rstrip(', ') + ', '
else:
ptxt = ""
N = N or self.N
if scale is None:
title = "{} wavelet | {}N={}".format(self.name, ptxt, N)
else:
title = "{} wavelet | {}scale={:.2f}, N={}".format(
self.name, ptxt, scale, N)
if not show_N:
title = title[:title.find(f"N={N}")].rstrip(', ')
return title
@classmethod
def _process_dtype(self, dtype, as_str=None):
"""Ensures `dtype` is supported, and converts per `as_str` (if True,
numpy/torch -> str, else vice versa; if None, returns as-is).
"""
if isinstance(dtype, str):
assert_is_one_of(dtype, 'dtype', Wavelet.DTYPES)
if not as_str:
return getattr(Q, dtype)
elif not isinstance(dtype, (type, np.dtype, torch.dtype)):
raise TypeError("`dtype` must be string or type (np./torch.dtype) "
"(got %s)" % dtype)
return dtype if not as_str else str(dtype).split('.')[-1]
#### Init ################################################################
@classmethod
def _init_if_not_isinstance(self, wavelet, **kw):
"""Circumvents type change from IPython's super-/auto-reload,
but first checks with usual isinstance."""
if isinstance_by_name(wavelet, Wavelet):
return wavelet
return Wavelet(wavelet, **kw)
def _validate_and_set_wavelet(self, wavelet):
def process_dtype(wavopts, user_passed_float32):
"""Handles GMW's `norm='energy'` w/ dtype='float32'."""
if wavopts.get('norm', 'bandpass') == 'energy':
if user_passed_float32:
WARN("`norm='energy'` w/ `dtype='float32'` is unsupported; "
"will use 'float64' instead.")
wavopts['dtype'] = 'float64'
self._dtype = 'float64'
elif self.dtype is not None:
wavopts['dtype'] = self.dtype
def set_dtype_from_out():
# 32 will promote to 64 if other params are 64
out_dtype = self.fn(S.asarray([1.], dtype='float32')).dtype
if any(tp in str(out_dtype) for tp in ('complex64', 'complex128')):
# 'bump' wavelet case
out_dtype = ('float32' if 'complex64' in str(out_dtype) else
'float64')
self._dtype = self._process_dtype(out_dtype, as_str=True)
if isinstance(wavelet, FunctionType):
self.fn = wavelet
set_dtype_from_out()
self.config = {}
return
errmsg = ("`wavelet` must be one of: (1) string name of supported "
"wavelet; (2) tuple of (1) and dict of wavelet parameters "
"(e.g. {'mu': 5}); (3) custom function taking `scale * xi` "
"as input. (got: %s)" % str(wavelet))
if not isinstance(wavelet, (tuple, str)):
raise TypeError(errmsg)
elif isinstance(wavelet, tuple):
if not (len(wavelet) == 2 and isinstance(wavelet[1], dict)):
raise TypeError(errmsg)
wavelet, wavopts = wavelet
elif isinstance(wavelet, str):
wavopts = {}
user_passed_float32 = any('float32' in str(t)
for t in (self.dtype, wavopts.get('dtype', 0)))
if isinstance(wavelet, str):
wavelet = wavelet.lower()
module = 'wavelets' if wavelet != 'gmw' else '_gmw'
wavopts = gdefaults(f"{module}.{wavelet}", get_all=True,
as_dict=True, default_order=True, **wavopts)
process_dtype(wavopts, user_passed_float32)
assert_is_one_of(wavelet, 'wavelet', Wavelet.SUPPORTED)
self.fn = {
'gmw': gmw,
'morlet': morlet,
'bump': bump,
'cmhat': cmhat,
'hhhat': hhhat,
}[wavelet](**wavopts)
if self.dtype is None:
set_dtype_from_out()
self.config = wavopts
@jit(nopython=True, cache=True)
def _xifn(scale, N, dtype=np.float64):
"""N=128: [0, 1, 2, ..., 64, -63, -62, ..., -1] * (2*pi / N) * scale
N=129: [0, 1, 2, ..., 64, -64, -63, ..., -1] * (2*pi / N) * scale
"""
xi = np.zeros(N, dtype=dtype)
h = scale * (2 * pi) / N
for i in range(N // 2 + 1):
xi[i] = i * h
for i in range(N // 2 + 1, N):
xi[i] = (i - N) * h
return xi
def _process_params_dtype(*params, dtype, auto_gpu=True):
if dtype is None:
dtype = S.asarray(params[0]).dtype
if auto_gpu:
dtype = Wavelet._process_dtype(dtype, as_str=True)
params = [S.astype(S.asarray(p), dtype) for p in params]
else:
dtype = Wavelet._process_dtype(dtype, as_str=True)
params = [np.asarray(p).astype(dtype) for p in params]
return params if len(params) > 1 else params[0]
#### Wavelet functions ######################################################
def morlet(mu=None, dtype=None):
"""Higher `mu` -> greater frequency, lesser time resolution.
Recommended range: 4 to 16. For `mu > 6` the wavelet is almsot exactly
Gaussian for most scales, providing maximum joint resolution.
`mu=13.4` matches Generalized Morse Wavelets' `(beta, gamma) = (3, 60)`.
For full correspondence see `help(_gmw.gmw)`.
https://en.wikipedia.org/wiki/Morlet_wavelet#Definition
https://www.desmos.com/calculator/0nslu0qivv
"""
mu, dtype = gdefaults('wavelets.morlet', mu=mu, dtype=dtype)
cs = (1 + np.exp(-mu**2) - 2 * np.exp(-3/4 * mu**2)) ** (-.5)
ks = np.exp(-.5 * mu**2)
mu, cs, ks = _process_params_dtype(mu, cs, ks, dtype=dtype)
# all other consts go to `C`; needed for numba.jit to not type promote to
# float64 due to Python floats (e.g. `2.`)
C = S.asarray([-.5, np.sqrt(2) * cs * pi**.25], dtype=dtype)
fn = _morlet_gpu if USE_GPU() else (_morlet_par if IS_PARALLEL() else _morlet)
return lambda w: fn(atleast_1d(w, dtype), mu, ks, C)
@jit(nopython=True, cache=True)
def _morlet(w, mu, ks, C):
return C[1]* (np.exp(C[0] * (w - mu)**2) - ks * np.exp(C[0] * w**2))
@jit(nopython=True, cache=True, parallel=True)
def _morlet_par(w, mu, ks, C):
return C[1]* (np.exp(C[0] * (w - mu)**2) - ks * np.exp(C[0] * w**2))
def _morlet_gpu(w, mu, ks, C):
return C[1] * (torch.exp(C[0] * (w - mu)**2) - ks * torch.exp(C[0] * w**2))
def bump(mu=None, s=None, om=None, dtype=None):
"""Bump wavelet.
https://www.mathworks.com/help/wavelet/gs/choose-a-wavelet.html
"""
mu, s, om, dtype = gdefaults('wavelets.bump', mu=mu, s=s, om=om, dtype=dtype)
if 'float' in dtype:
dtype = 'complex' + str(2 * int(dtype.strip('float')))
mu, s, om = [S.asarray(g, dtype) for g in (mu, s, om)]
C = S.asarray([2 * pi * 1j * om, .443993816053287], dtype=dtype)
C0 = S.asarray(.999, dtype='float' + str(int(dtype.strip('complex'))//2))
fn = _bump_gpu if USE_GPU() else (_bump_par if IS_PARALLEL() else _bump)
return lambda w: fn(atleast_1d(w, dtype), (atleast_1d(w, dtype) - mu) / s,
s, C, C0)
@jit(nopython=True, cache=True)
def _bump(w, _w, s, C, C0):
return np.exp(C[0] * w) / s * (
np.abs(_w) < C0) * np.exp(
-1 / (1 - (_w * (np.abs(_w) < C0))**2)) / C[1]
@jit(nopython=True, cache=True, parallel=True)
def _bump_par(w, _w, s, C, C0):
return np.exp(C[0] * w) / s * (
np.abs(_w) < C0) * np.exp(
-1 / (1 - (_w * (np.abs(_w) < C0))**2)) / C[1]
def _bump_gpu(w, _w, s, C, C0):
return torch.exp(C[0] * w) / s * (
torch.abs(_w) < C0) * torch.exp(
-1 / (1 - (_w * (torch.abs(_w) < C0))**2)) / C[1]
def cmhat(mu=None, s=None, dtype=None):
"""Complex Mexican Hat wavelet.
https://en.wikipedia.org/wiki/Complex_mexican_hat_wavelet
"""
mu, s, dtype = gdefaults('wavelets.cmhat', mu=mu, s=s, dtype=dtype)
mu, s = _process_params_dtype(mu, s, dtype=dtype)
C = S.asarray([5/2, 2 * np.sqrt(2/3) * pi**(-1/4)], dtype=dtype)
fn = _cmhat_gpu if USE_GPU() else (_cmhat_par if IS_PARALLEL() else _cmhat)
return lambda w: fn(atleast_1d(w, dtype) - mu, s, C)
@jit(nopython=True, cache=True)
def _cmhat(_w, s, C):
return C[1] * (s**C[0] * _w**2 * np.exp(-s**2 * _w**2 / 2) * (_w >= 0))
@jit(nopython=True, cache=True, parallel=True)
def _cmhat_par(_w, s, C):
return C[1] * (s**C[0] * _w**2 * np.exp(-s**2 * _w**2 / 2) * (_w >= 0))
def _cmhat_gpu(_w, s, C):
return C[1] * (s**C[0] * _w**2 * torch.exp(-s**2 * _w**2 / 2) * (_w >= 0))
def hhhat(mu=None, dtype=None):
"""Hilbert analytic function of Hermitian Hat."""
mu, dtype = gdefaults('wavelets.hhhat', mu=mu, dtype=dtype)
mu = _process_params_dtype(mu, dtype=dtype)
C = S.asarray([-1/2, 2 / np.sqrt(5) * pi**(-1/4)], dtype=dtype)
fn = _hhhat_gpu if USE_GPU() else (_hhhat_par if IS_PARALLEL() else _hhhat)
return lambda w: fn(atleast_1d(w, dtype) - mu, C)
@jit(nopython=True, cache=True)
def _hhhat(_w, C):
return C[1] * (_w * (1 + _w) * np.exp(C[0] * _w**2)) * (1 + np.sign(_w))
@jit(nopython=True, cache=True, parallel=True)
def _hhhat_par(_w, C):
return C[1] * (_w * (1 + _w) * np.exp(C[0] * _w**2)) * (1 + np.sign(_w))
def _hhhat_gpu(_w, C):
return C[1] * (_w * (1 + _w) * torch.exp(C[0] * _w**2)) * (1 + torch.sign(_w))
#### Wavelet properties ######################################################
def center_frequency(wavelet, scale=None, N=1024, kind='energy', force_int=None,
viz=False):
"""Center frequency (radian) of `wavelet`, either 'energy', 'peak',
or 'peak-ct'.
Detailed overviews:
(1) https://dsp.stackexchange.com/a/76371/50076
(2) https://dsp.stackexchange.com/q/72042/50076
**Note**: implementations of `center_frequency`, `time_resolution`, and
`freq_resolution` are discretized approximations of underlying
continuous-time parameters. This is a flawed approach (see (1)).
- Caution is advised for scales near minimum and maximim (obtained via
`cwt_scalebounds(..., preset='maximal')`), where inaccuracies may be
significant.
- For intermediate scales and sufficiently large N (>=1024), the methods
are reliable. May improve in the future
# Arguments
wavelet: wavelets.Wavelet
scale: float / None
Scale at which to compute `wc`; ignored if `kind='peak-ct'`.
N: int
Length of wavelet.
kind: str['energy', 'peak', 'peak-ct']
- 'energy': weighted mean of wavelet energy, or energy expectation;
Eq 4.52 of [1]:
wc_1 = int w |wavelet(w)|^2 dw 0..inf
wc_scale = int (scale*w) |wavelet(scale*w)|^2 dw 0..inf
= wc_1 / scale
- 'peak': value of `w` at which `wavelet` at `scale` peaks
(is maximum) in discrete time, i.e. constrained 0 to pi.
- 'peak-ct': value of `w` at which `wavelet` peaks (without `scale`,
i.e. `scale=1`), i.e. peak location of the continuous-time function.
Can be used to find `scale` at which `wavelet` is most well-behaved,
e.g. at eighth of sampling frequency (centered between 0 and fs/4).
- 'energy' == 'peak' for wavelets exactly even-symmetric about mode
(peak location)
force_int: bool / None
Relevant only if `kind='energy'`, then defaulting to True. Set to
False to compute via formula - i.e. first integrate at a
"well-behaved" scale, then rescale. For intermediate scales, this
won't yield much difference. For extremes, it matches the
continuous-time results closer - but this isn't recommended, as it
overlooks limitations imposed by discretization (trimmed/undersampled
freq-domain bell).
viz: bool (default False)
Whether to visualize obtained center frequency.
**Misc**
For very high scales, 'energy' w/ `force_int=True` will match 'peak'; for
very low scales, 'energy' will always be less than 'peak'.
To convert to Hz:
wc [(cycles*radians)/samples] / (2pi [radians]) * fs [samples/second]
= fc [cycles/second]
See tests/props_test.py for further info.
# References
1. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
"""
def _viz(wc, params):
w, psih, apsih2 = params
_w = w[N//2-1:]; _psih = psih[N//2-1:]; _apsih2 = apsih2[N//2-1:]
wc = wc if (kind != 'peak-ct') else pi/4
vline = (wc, dict(color='tab:red', linestyle='--'))
plot(_w, _psih, show=1, vlines=vline,
title="psih(w)+ (frequency-domain wavelet, pos half)")
plot(_w, _w * _apsih2, show=1,
title="w^2 |psih(w)+|^2 (used to compute wc)")
print("wc={}".format(wc))
def _params(wavelet, scale, N):
w = S.asarray(aifftshift(_xifn(1, N)))
psih = asnumpy(wavelet(S.asarray(scale) * w))
apsih2 = np.abs(psih)**2
w = asnumpy(w)
return w, psih, apsih2
def _energy_wc(wavelet, scale, N, force_int):
use_formula = not force_int
if use_formula:
scale_orig = scale
wc_ct = _peak_ct_wc(wavelet, N)[0]
scale = (4/pi) * wc_ct
w, psih, apsih2 = _params(wavelet, scale, N)
wc = (integrate.trapz(apsih2 * w) /
integrate.trapz(apsih2))
if use_formula:
wc *= (scale / scale_orig)
return float(wc), (w, psih, apsih2)
def _peak_wc(wavelet, scale, N):
w, psih, apsih2 = _params(wavelet, scale, N)
wc = w[np.argmax(apsih2)]
return float(wc), (w, psih, apsih2)
def _peak_ct_wc(wavelet, N):
wc, _ = find_maximum(wavelet.fn)
# need `scale` such that `wavelet` peaks at `scale * xi.max()/4`
# thus: `wc = scale * (pi/2)` --> `scale = (4/pi)*wc`
scale = S.asarray((4/pi) * wc)
w, psih, apsih2 = _params(wavelet, scale, N)
return float(wc), (w, psih, apsih2)
if force_int and 'peak' in kind:
NOTE("`force_int` ignored with 'peak' in `kind`")
assert_is_one_of(kind, 'kind', ('energy', 'peak', 'peak-ct'))
if kind == 'peak-ct' and scale is not None:
NOTE("`scale` ignored with `peak = 'peak-ct'`")
if scale is None and kind != 'peak-ct':
# see _peak_ct_wc
wc, _ = find_maximum(wavelet.fn)
scale = (4/pi) * wc
wavelet = Wavelet._init_if_not_isinstance(wavelet)
if kind == 'energy':
force_int = force_int or True
wc, params = _energy_wc(wavelet, scale, N, force_int)
elif kind == 'peak':
wc, params = _peak_wc(wavelet, scale, N)
elif kind == 'peak-ct':
wc, params = _peak_ct_wc(wavelet, N)
if viz:
_viz(wc, params)
return wc
def freq_resolution(wavelet, scale=10, N=1024, nondim=True, force_int=True,
viz=False):
"""Compute wavelet frequency width (std_w) for a given scale and N; larger N
-> less discretization error, but same N as in application works best
(larger will be "too accurate" and misrepresent true discretized values).
`nondim` will divide by peak center frequency and return unitless quantity.
Eq 22 in [1], Sec 4.3.2 in [2].
Detailed overview: https://dsp.stackexchange.com/q/72042/50076
See tests/props_test.py for further info.
# References
1. Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede.
https://sci-hub.st/10.1109/TSP.2008.2007607
2. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
"""
def _viz():
_w = w[N//2-1:]; _psih = psih[N//2-1:]; _apsih2 = apsih2[N//2-1:]
plot(_w, _psih, show=1,
title="psih(w)+ (frequency-domain wavelet, pos half)")
plot(_w, (_w-wce)**2 * _apsih2, show=1,
title="(w-wc)^2 |psih(w)+|^2 (used to compute var_w)")
print("std_w={}".format(std_w))
if use_formula:
NOTE(f"integrated at scale={scale} then used formula; "
"see help(freq_resolution) and try force_int=True")
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# formula criterion not optimal; thresholds will vary by wavelet config
use_formula = ((scale < 4 or scale > N / 5) and not force_int)
if use_formula:
scale_orig = scale
scale = (4/pi) * wavelet.wc_ct
w = aifftshift(_xifn(1, N))
psih = asnumpy(wavelet(scale * w))
wce = center_frequency(wavelet, scale, force_int=force_int, kind='energy')
apsih2 = np.abs(psih)**2
var_w = (integrate.trapz((w - wce)**2 * apsih2, w) /
integrate.trapz(apsih2, w))
std_w = np.sqrt(var_w)
if use_formula:
std_w *= (scale / scale_orig)
scale = scale_orig
if nondim:
wcp = center_frequency(wavelet, scale, kind='peak')
std_w /= wcp
if viz:
_viz()
return std_w
def time_resolution(wavelet, scale=10, N=1024, min_decay=1e3, max_mult=2,
min_mult=2, force_int=True, nondim=True, viz=False):
"""Compute wavelet time resolution for a given scale and N; larger N
-> less discretization error, but same N as in application should suffice.
Eq 21 in [1], Sec 4.3.2 in [2].
Detailed overview: https://dsp.stackexchange.com/q/72042/50076
`nondim` will multiply by peak center frequency and return unitless quantity.
______________________________________________________________________________
**Interpretation**
Measures time-span of 68% of wavelet's energy (1 stdev for Gauss-shaped
|psi(t)|^2). Inversely-proportional with `N`, i.e. same `scale` spans half
the fraction of sequence that's twice long. Is actually *half* the span
per unilateral (radius) std.
std_t ~ scale (T / N)
______________________________________________________________________________
**Implementation details**
`t` may be defined from `min_mult` up to `max_mult` times the original span
for computing stdev since wavelet may not decay to zero within target frame.
For any mult > 1, this is biased if we are convolving by sliding windows of
length `N` in CWT, but we're not (see `cwt`); our scheme captures full wavelet
characteristics, i.e. as if conv/full decayed length (but only up to mult=2).
`min_decay` controls decay criterion of time-wavelet domain in integrating,
i.e. ratio of max to endpoints of |psi(t)|^2 must exceed this. Will search
up to `max_mult * N`-long `t`.
For small `scale` (<~3) results are harder to interpret and defy expected
behavior per discretization complications (call with `viz=True`). Workaround
via computing at stable scale and calculating via formula shouldn't work as
both-domain behaviors deviate from continuous, complete counterparts.
______________________________________________________________________________
See tests/props_test.py for further info.
# References
1. Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede.
https://sci-hub.st/10.1109/TSP.2008.2007607
2. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
"""
def _viz():
_w = aifftshift(xi)[Nt//2-1:]
_psih = aifftshift(psih)[Nt//2-1:]
plot(_w, _psih, show=1,
title="psih(w)+ (frequency-domain wavelet, pos half)")
plot(t, t**2 * apsi2, title="t^2 |psi(t)|^2 (used to compute var_t)",
show=1)
_viz_cwt_scalebounds(wavelet, N, max_scale=scale, std_t=std_t, Nt=Nt)
print("std_t={}\nlen(t), len(t)/N, t_min, t_max = {}, {}, {}, {}".format(
std_t, len(t), len(t)/N, t.min(), t.max()))
if use_formula:
NOTE(f"integrated at scale={scale} then used formula; "
"see help(time_resolution) and try force_int=True")
def _make_integration_t(wavelet, scale, N, min_decay, max_mult, min_mult):
"""Ensure `psi` decays sufficiently at integration bounds"""
for mult in np.arange(min_mult, max_mult + 1):
Nt = int(mult * N)
apsi2 = np.abs(asnumpy(wavelet.psifn(scale=scale, N=Nt)))**2
# ensure sufficient decay at endpoints (assumes ~symmetric decay)
if apsi2.max() / apsi2[:max(10, Nt//100)].mean() > min_decay:
break
else:
raise Exception(("Couldn't find decay timespan satisfying "
"`(min_decay, max_mult) = ({}, {})` for `scale={}`; "
"decrease former or increase latter or check "
"`wavelet`".format(min_decay, max_mult, scale)))
# len(t) == mult*N (independent of T)
# `t` doesn't have zero-mean but that's correct for psi's peak & symmetry
T = N
t = np.arange(-mult * T/2, mult * T/2, step=T/N)
return t
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# formula criterion not optimal; thresholds will vary by wavelet config
use_formula = ((scale < 4 or scale > N / 5) and not force_int)
if use_formula:
scale_orig = scale
scale = (4/pi) * wavelet.wc_ct
t = _make_integration_t(wavelet, scale, N, min_decay, max_mult, min_mult)
Nt = len(t)
xi = _xifn(1, Nt)
psih = asnumpy(wavelet(scale * xi, nohalf=False))
psi = asnumpy(ifft(psih * (-1)**np.arange(Nt)))
apsi2 = np.abs(psi)**2
var_t = (integrate.trapz(t**2 * apsi2, t) /
integrate.trapz(apsi2, t))
std_t = np.sqrt(var_t)
if use_formula:
std_t *= (scale_orig / scale)
scale = scale_orig
if nondim:
# 'energy' yields values closer to continuous-time counterparts,
# but we seek accuracy relative to discretized values
wc = center_frequency(wavelet, scale, N=N, kind='peak')
std_t *= wc
if viz:
_viz()
return std_t
#### Misc ####################################################################
def afftshift(xh):
"""Needed since analytic wavelets keep Nyquist bin at N//2 positive bin
whereas FFT convention is to file it under negative (see `_xi`).
Moves right N//2 + 1 bins to left.
"""
if len(xh) % 2 == 0:
return _afftshift_even(xh, np.zeros(len(xh), dtype=xh.dtype))
return fftshift(xh)
@jit(nopython=True, cache=True)
def _afftshift_even(xh, xhs):
N = len(xh)
for i in range(N // 2 + 1):
xhs[i] = xh[i + N // 2 - 1]
for i in range(N // 2 + 1, N):
xhs[i] = xh[i - N // 2 - 1]
return xhs
def aifftshift(xh):
"""Inversion also different; moves left N//2+1 bins to right."""
if len(xh) % 2 == 0:
return _aifftshift_even(xh, np.zeros(len(xh), dtype=xh.dtype))
return ifftshift(xh)
@jit(nopython=True, cache=True)
def _aifftshift_even(xh, xhs):
N = len(xh)
for i in range(N // 2 + 1):
xhs[i + N//2 - 1] = xh[i]
for i in range(N // 2 + 1, N):
xhs[i - N//2 - 1] = xh[i]
return xhs
def _fn_to_name(fn):
"""`_` to ` `, removes `<lambda>` & `.`, handles `SPECIALS`."""
SPECIALS = {'Gmw ': 'GMW '}
name = fn.__qualname__.replace('_', ' ').replace('<locals>', '').replace(
'<lambda>', '').replace('.', '').title()
for k, v in SPECIALS.items():
name = name.replace(k, v)
return name
def isinstance_by_name(obj, ref):
"""IPython reload can make isinstance(Obj(), Obj) fail; won't work if
Obj has __str__ overridden."""
def _class_name(obj):
name = getattr(obj, '__qualname__', getattr(obj, '__name__', ''))
return (getattr(obj, '__module__', '') + '.' + name).lstrip('.')
return _class_name(type(obj)) == _class_name(ref)
##############################################################################
from ._gmw import gmw
from . import visuals
from .visuals import plot, _viz_cwt_scalebounds
from .utils.common import WARN, NOTE, pi, assert_is_one_of
from .utils.backend import asnumpy
| 38,840
| 38.154234
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/_stft.py
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.signal as sig
from .utils import WARN, padsignal, buffer, unbuffer, window_norm
from .utils import _process_fs_and_t
from .utils.fft_utils import fft, ifft, rfft, irfft, fftshift, ifftshift
from .utils.backend import torch, is_tensor
from .algos import zero_denormals
from .wavelets import _xifn, _process_params_dtype
from .configs import gdefaults, USE_GPU
def stft(x, window=None, n_fft=None, win_len=None, hop_len=1, fs=None, t=None,
padtype='reflect', modulated=True, derivative=False, dtype=None):
"""Short-Time Fourier Transform.
`modulated=True` computes "modified" variant from [1] which is advantageous
to reconstruction & synchrosqueezing (see "Modulation" below).
# Arguments:
x: np.ndarray
Input vector(s), 1D or 2D. See `help(cwt)`.
window: str / np.ndarray / None
STFT windowing kernel. If string, will fetch per
`scipy.signal.get_window(window, win_len, fftbins=True)`.
Defaults to `scipy.signal.windows.dpss(win_len, win_len//8)`;
the DPSS window provides the best time-frequency resolution.
Always padded to `n_fft`, so for accurate filter characteristics
(side lobe decay, etc), best to pass in pre-designed `window`
with `win_len == n_fft`.
n_fft: int >= 0 / None
FFT length, or `(STFT column length) // 2 + 1`.
If `win_len < n_fft`, will pad `window`. Every STFT column is
`fft(window * x_slice)`.
Defaults to `len(x)//hop_len`, up to 512.
win_len: int >= 0 / None
Length of `window` to use. Used to generate a window if `window`
is string, and ignored if it's np.ndarray.
Defaults to `n_fft//8` or `len(window)` (if `window` is np.ndarray).
hop_len: int > 0
STFT stride, or number of samples to skip/hop over between subsequent
windowings. Relates to 'overlap' as `overlap = n_fft - hop_len`.
Must be 1 for invertible synchrosqueezed STFT.
fs: float / None
Sampling frequency of `x`. Defaults to 1, which makes ssq frequencies
range from 0 to 0.5*fs, i.e. as fraction of reference sampling rate
up to Nyquist limit. Used to compute `dSx` and `ssq_freqs`.
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Overrides `fs` if not None.
padtype: str
Pad scheme to apply on input. See `help(utils.padsignal)`.
modulated: bool (default True)
Whether to use "modified" variant as in [1], which centers DFT
cisoids at the window for each shift `u`. `False` will not invert
once synchrosqueezed.
Recommended to use `True`; see "Modulation" below.
derivative: bool (default False)
Whether to compute and return `dSx`. Uses `fs`.
dtype: str['float32', 'float64'] / None
Compute precision; use 'float32` for speed & memory at expense of
accuracy (negligible for most purposes).
If None, uses value from `configs.ini`.
To be safe with `'float32'`, time-localized `window`, and large
`hop_len`, use
from ssqueezepy._stft import _check_NOLA
_check_NOLA(window, hop_len, 'float32', imprecision_strict=True)
**Modulation**
`True` will center DFT cisoids at the window for each shift `u`:
Sm[u, k] = sum_{0}^{N-1} f[n] * g[n - u] * exp(-j*2pi*k*(n - u)/N)
as opposed to usual STFT:
S[u, k] = sum_{0}^{N-1} f[n] * g[n - u] * exp(-j*2pi*k*n/N)
Most implementations (including `scipy`, `librosa`) compute *neither*,
but rather center the window for each slice, thus shifting DFT bases
relative to n=0 (t=0). These create spectra that, viewed as signals, are
of high frequency, making inversion and synchrosqueezing very unstable.
Details & visuals: https://dsp.stackexchange.com/a/72590/50076
# Returns:
Sx: [(n_fft//2 + 1) x n_hops] np.ndarray
STFT of `x`. Positive frequencies only (+dc), via `rfft`.
(n_hops = (len(x) - 1)//hop_len + 1)
(rows=scales, cols=timeshifts)
dWx: [(n_fft//2 + 1) x n_hops] np.ndarray
Returned only if `derivative=True`.
Time-derivative of the STFT of `x`, computed via STFT done with
time-differentiated `window`, as in [1]. This differs from CWT's,
where its (and Sx's) DFTs are taken along columns rather than rows.
d/dt(window) obtained via freq-domain differentiation (help(cwt)).
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
"""
def _stft(xp, window, diff_window, n_fft, hop_len, fs, modulated, derivative):
Sx = buffer(xp, n_fft, n_fft - hop_len, modulated)
if derivative:
dSx = buffer(xp, n_fft, n_fft - hop_len, modulated)
if modulated:
window = ifftshift(window, astensor=True)
if derivative:
diff_window = ifftshift(diff_window, astensor=True) * fs
reshape = (-1, 1) if xp.ndim == 1 else (1, -1, 1)
Sx *= window.reshape(*reshape)
if derivative:
dSx *= (diff_window.reshape(*reshape))
# keep only positive frequencies (Hermitian symmetry assuming real `x`)
axis = 0 if xp.ndim == 1 else 1
Sx = rfft(Sx, axis=axis, astensor=True)
if derivative:
dSx = rfft(dSx, axis=axis, astensor=True)
return (Sx, dSx) if derivative else (Sx, None)
# process args
assert x.ndim in (1, 2)
N = x.shape[-1]
_, fs, _ = _process_fs_and_t(fs, t, N)
n_fft = n_fft or min(N//hop_len, 512)
# process `window`, make `diff_window`, check NOLA, enforce `dtype`
if win_len is None:
win_len = (len(window) if isinstance(window, np.ndarray) else
n_fft)
dtype = gdefaults('_stft.stft', dtype=dtype)
window, diff_window = get_window(window, win_len, n_fft, derivative=True,
dtype=dtype)
_check_NOLA(window, hop_len, dtype)
x = _process_params_dtype(x, dtype=dtype, auto_gpu=False)
# pad `x` to length `padlength`
padlength = N + n_fft - 1
xp = padsignal(x, padtype, padlength=padlength)
# arrays -> tensors if using GPU
if USE_GPU():
xp, window, diff_window = [torch.as_tensor(g, device='cuda') for g in
(xp, window, diff_window)]
# take STFT
Sx, dSx = _stft(xp, window, diff_window, n_fft, hop_len, fs, modulated,
derivative)
# ensure indexing works as expected downstream (cupy)
Sx = Sx.contiguous() if is_tensor(Sx) else Sx
dSx = dSx.contiguous() if is_tensor(dSx) else dSx
return (Sx, dSx) if derivative else Sx
def istft(Sx, window=None, n_fft=None, win_len=None, hop_len=1, N=None,
modulated=True, win_exp=1):
"""Inverse Short-Time Fourier transform. Computed with least-squares
estimate for `win_exp`=1 per Griffin-Lim [1], recommended for STFT with
modifications, else simple inversion with `win_exp`=0:
x[n] = sum(y_t[n] * w^a[n - tH]) / sum(w^{a+1}[n - tH]),
y_t = ifft(Sx), H = hop_len, a = win_exp, t = hop index, n = sample index
Warns if `window` NOLA constraint isn't met (see [2]), invalidating inversion.
Nice visuals and explanations on istft:
https://www.mathworks.com/help/signal/ref/istft.html
# Arguments:
Sx: np.ndarray
STFT of 1D `x`.
window, n_fft, win_len, hop_len, modulated
Should be same as used in forward STFT. See `help(stft)`.
N: int > 0 / None
`len(x)` of original `x`, used in inversion padding and windowing.
If None, assumes longest possible `x` given `hop_len`, `Sx.shape[1]`.
win_exp: int >= 0
Window power used in inversion (see [1], [2], or equation above).
# Returns:
x: np.ndarray, 1D
Signal as reconstructed from `Sx`.
# References:
1. Signal Estimation from Modified Short-Time Fourier Transform.
D. W. Griffin, J. S. Lim.
https://citeseerx.ist.psu.edu/viewdoc/
download?doi=10.1.1.306.7858&rep=rep1&type=pdf
2. Invertibility of overlap-add processing. B. Sharpe.
https://gauss256.github.io/blog/cola.html
"""
### process args #####################################
n_fft = n_fft or (Sx.shape[0] - 1) * 2
win_len = win_len or n_fft
N = N or hop_len * Sx.shape[1] # assume largest possible N if not given
dtype = 'float32' if str(Sx.dtype) == 'complex64' else 'float64'
window = get_window(window, win_len, n_fft=n_fft, dtype=dtype)
_check_NOLA(window, hop_len, dtype=dtype)
xbuf = irfft(Sx, n=n_fft, axis=0).real
if modulated:
xbuf = fftshift(xbuf, axes=0)
# overlap-add the columns
x = unbuffer(xbuf, window, hop_len, n_fft, N, win_exp)
# window norm, control for float precision
wn = window_norm(window, hop_len, n_fft, N, win_exp)
th = np.finfo(x.dtype).tiny
if wn.min() < th:
approx_nonzero_idxs = wn > th
x[approx_nonzero_idxs] /= wn[approx_nonzero_idxs]
else:
x /= wn
# unpad
x = x[n_fft//2 : -((n_fft - 1)//2)]
return x
def get_window(window, win_len, n_fft=None, derivative=False, dtype=None):
"""See `window` in `help(stft)`. Will return window of length `n_fft`,
regardless of `win_len` (will pad if needed).
"""
if n_fft is None:
pl, pr = 0, 0
else:
if win_len > n_fft:
raise ValueError("Can't have `win_len > n_fft` ({} > {})".format(
win_len, n_fft))
pl = (n_fft - win_len) // 2
pr = (n_fft - win_len - pl)
if window is not None:
if isinstance(window, str):
# fftbins=True -> 'periodic' window -> narrower main side-lobe and
# closer to zero-phase in left=right padded case
# for windows edging at 0
window = sig.get_window(window, win_len, fftbins=True)
elif isinstance(window, np.ndarray):
if len(window) != win_len:
WARN("len(window) != win_len (%s != %s)" % (len(window), win_len))
else:
raise ValueError("`window` must be string or np.ndarray "
"(got %s)" % window)
else:
# sym=False <-> fftbins=True (see above)
window = sig.windows.dpss(win_len, max(4, win_len//8), sym=False)
if len(window) < (win_len + pl + pr):
window = np.pad(window, [pl, pr])
if derivative:
wf = fft(window)
Nw = len(window)
xi = _xifn(1, Nw)
if Nw % 2 == 0:
xi[Nw // 2] = 0
# frequency-domain differentiation; see `dWx` return docs in `help(cwt)`
diff_window = ifft(wf * 1j * xi).real
# cast `dtype`, zero denormals (extremely small numbers that slow down CPU)
window = _process_params_dtype(window, dtype=dtype, auto_gpu=False)
zero_denormals(window)
if derivative:
diff_window = _process_params_dtype(diff_window, dtype=dtype,
auto_gpu=False)
zero_denormals(diff_window)
return (window, diff_window) if derivative else window
def _check_NOLA(window, hop_len, dtype=None, imprecision_strict=False):
"""https://gauss256.github.io/blog/cola.html"""
# basic NOLA
if hop_len > len(window):
WARN("`hop_len > len(window)`; STFT not invertible")
elif not sig.check_NOLA(window, len(window), len(window) - hop_len):
WARN("`window` fails Non-zero Overlap Add (NOLA) criterion; "
"STFT not invertible")
# handle `dtype`; note this is just a guess, what matters is `Sx.dtype`
if dtype is None:
dtype = str(window.dtype)
# check for right boundary effect: as ssqueezepy's number of output frames
# is critically sampled (not more than needed), it creates an issue with
# float32 and time-localized windows, which struggle to invert the last frame
tol = 0.15 if imprecision_strict else 1e-3
if dtype == 'float32' and not sig.check_NOLA(
window, len(window), len(window) - hop_len, tol=tol):
# 1e-3 can still have imprecision detectable by eye, but only upon few
# samples, so avoid paranoia. Use 1e-2 to be safe, and 0.15 for ~exact
WARN("Imprecision expected at right-most hop of signal, in inversion. "
"Lower `hop_len`, choose wider `window`, or use `dtype='float64'`.")
| 13,033
| 39.858934
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/toolkit.py
|
# -*- coding: utf-8 -*-
import numpy as np
from .visuals import imshow, plot
#### Synchrosqueezing ########################################################
def lin_band(Tx, slope, offset, bw=.025, **kw):
"""Visually estimate a linear band to invert over in time-frequency(/scale)
plane.
"""
na, N = Tx.shape
tcs = np.linspace(0, 1, N)
Cs = slope*(tcs + offset) * na
freqband = bw * na * np.ones(N)
Cs, freqband = Cs.astype('int32'), freqband.astype('int32')
imshow(Tx, abs=1, aspect='auto', show=0, **kw)
plot(Cs + freqband, color='r')
plot(Cs - freqband, color='r', show=1)
return Cs, freqband
#### Signals #################################################################
def _t(min, max, N, endpoint=False):
return np.linspace(min, max, N, endpoint=endpoint)
def cos_f(freqs, N=128, phi=0, endpoint=False):
"""Adjacent different frequency cosines."""
return np.concatenate([np.cos(2*np.pi * f * (_t(i, i + 1, N, endpoint) + phi))
for i, f in enumerate(freqs)])
def sin_f(freqs, N=128, phi=0, endpoint=False):
"""Adjacent different frequency sines."""
return np.concatenate([np.sin(2*np.pi * f * (_t(i, i + 1, N, endpoint) + phi))
for i, f in enumerate(freqs)])
#### Misc ####################################################################
def mad_rms(x, xrec):
"""Reconstruction error metric; scale-invariant, robust to outliers
and partly sparsity. https://stats.stackexchange.com/q/495242/239063"""
return np.mean(np.abs(x - xrec)) / np.sqrt(np.mean(x**2))
def where_amax(x):
"""Return N-dimensional indices of where `abs(x) == max(abs(x))`."""
return np.where(np.abs(x) == np.abs(x).max())
| 1,755
| 36.361702
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/visuals.py
|
# -*- coding: utf-8 -*-
"""Convenience visual methods"""
import numpy as np
from pathlib import Path
from .algos import find_closest, find_maximum
from .configs import gdefaults
from . import plt
#### Visualizations ##########################################################
def wavelet_tf(wavelet, N=2048, scale=None, notext=False, width=1.1, height=1):
"""Visualize `wavelet` joint time-frequency resolution. Plots frequency-domain
wavelet (psih) along y-axis, and time-domain wavelet (psi) along x-axis.
Orthogonal units (e.g. y-axis for psi) are meaningless; function values
aren't to scale, but *widths* are, so time-frequency uncertainties are
accurately captured.
`wavelet` is instance of `wavelets.Wavelet` or its valid `wavelet` argument.
See also: https://www.desmos.com/calculator/0nslu0qivv
"""
def pick_scale(wavelet, N):
"""Pick scale such that both time- & freq-domain wavelets look nice."""
st_min, st_max = 65 * (N / 2048), 75 * (N / 2048)
max_iters = 100
scale = wavelet.scalec_ct
# generous `min_decay` since we don't care about initial bad cases
kw = dict(wavelet=wavelet, N=N, min_decay=1, nondim=False)
std_t = time_resolution(scale=scale, **kw)
i = 0
while not (st_min < std_t < st_max):
if std_t > st_max:
scale /= 1.1
else:
scale *= 1.1
std_t = time_resolution(scale=scale, **kw)
if i > max_iters:
raise ValueError(f"couldn't autofind `scale` after {max_iters} "
"iterations, aborting")
i += 1
return scale
wavelet = Wavelet._init_if_not_isinstance(wavelet)
if scale is None:
scale = pick_scale(wavelet, N)
#### Compute psi & psihf #################################################
psi = asnumpy(wavelet.psifn(scale=scale, N=N))
apsi = np.abs(psi)
t = np.arange(-N/2, N/2, step=1)
w = _xifn(1, N)[:N//2 + 1]
psih = asnumpy(wavelet(scale * w))
#### Compute stdevs & respective indices #################################
wc = center_frequency(wavelet, scale, N)
std_w = freq_resolution(wavelet, scale, N, nondim=0)
std_t = time_resolution(wavelet, scale, N, nondim=0, min_decay=1)
_wc = np.pi - wc
wlix = np.argmin(np.abs(w - (_wc - std_w)))
wrix = np.argmin(np.abs(w - (_wc + std_w)))
wl, wr = w[wlix], w[wrix]
tlix = np.argmin(np.abs(t - (0 - std_t)))
trix = np.argmin(np.abs(t - (0 + std_t)))
tl, tr = t[tlix], t[trix]
## Rescale psi so that its y-coords span 1/5 of psih's x-coords, & vice-versa
frac = 5
psig = psi * (w.max() / apsi.max()) / frac
apsig = apsi * (w.max() / apsi.max()) / frac
psihg = psih * (t.max() / psih.max()) / frac
# additionally shift psih to psi's left
psihg += t.min()
## Find intersections
w_xminu, w_xmax = psihg[::-1][wlix], tr
w_xmind = psihg[::-1][wrix] # psih not necessarily symmetric
w_ymin, w_ymax = wl, wr
t_xmin, t_xmax = tl, tr
t_yminl, t_ymax = apsig[tlix], wr
t_yminr = apsig[trix] # same for psi
#### Plot ################################################################
plot(t, psig, complex=1, h=1.5)
plot(t, apsig, linestyle='--', color='k')
plot(psihg[::-1], w, color='purple')
# bounds lines
lkw = dict(color='k', linewidth=1)
plot([t_xmin, t_xmin], [t_yminl, t_ymax], **lkw)
plot([t_xmax, t_xmax], [t_yminr, t_ymax], **lkw)
plot([w_xminu, w_xmax], [w_ymin, w_ymin], **lkw)
plot([w_xmind, w_xmax], [w_ymax, w_ymax], **lkw)
plt.xlim(t.min()*1.02, t.max()*1.02)
# radians 0 to pi from top to bottom(=psi's mean)
ylabels = np.round(np.linspace(np.pi, 0, 7), 1)
plt.yticks(np.linspace(0, np.pi, len(ylabels)), ylabels)
if notext:
plt.gcf().set_size_inches(12*width, 12*height)
plt.show()
return
#### Title, annotations, labels, styling #################################
## Annotation: info summary
txt = (" wc = {:<6.5f} rad-c/s\n"
" std_t = {:<6.4f} s/c-rad\n"
" std_w = {:<6.5f} rad-c/s\n"
"area/4 = {:.12f}\n"
" = std_t * std_w\n\n"
"(rad-c/s=\n radians*cycles/samples)"
).format(wc, std_t, std_w, std_t * std_w)
_annotate(txt, xy=(.7, .76), fontsize=16)
## Title: wavelet name & parameters
title = wavelet._desc(N=N, scale=scale)
plt.title(title, loc='left', weight='bold', fontsize=16)
## Styling
plt.xlabel("samples", weight='bold', fontsize=15)
plt.ylabel("radians", weight='bold', fontsize=15)
plt.gcf().set_size_inches(12*width, 12*height)
plt.show()
def wavelet_tf_anim(wavelet, N=2048, scales=None, width=1.1, height=1,
savepath='wavanim.gif', testing=False):
"""This method computes same as `wavelet_tf` but for all scales at once,
and animates 'intelligently'. See help(wavelet_tf).
`scales=None` will default to 'log:minimal' with (.9*min_scale,
0.25*max_scale). These are selected to show the wavelet a little outside of
"well-behaved" range (without slashing max_scale, it's a lot outside such
range). May not work for every wavelet or all of their configs.
"""
def _make_anim_scales(scales, wavelet, N):
if scales is None:
scales = 'log:minimal'
mn, mx = cwt_scalebounds(wavelet, N=N, preset='maximal',
use_padded_N=False)
scales = make_scales(N, 0.90*mn, 0.25*mx, scaletype='log')
else:
scales = process_scales(scales, N, wavelet, use_padded_N=False)
# compute early and late scales more densely as they capture more
# interesting behavior, so animation will slow down smoothly near ends
scales = scales.squeeze()
na = len(scales)
s0 = (25/253)*na # empircally-determined good value
srepl = max(int(s0), 1) # scales to keep from each end
srepr = max(int(s0), 1)
smull = 4 # extension factor
smulr = 3
sright = np.linspace(scales[-srepr], scales[-1], srepr * smulr)
sleft = np.linspace(scales[0], scales[srepl], srepl * smull)
sright = np.hstack([sright, sright[-1].repeat(smulr*2)]) # smooth loop
sleft = np.hstack([sleft[0].repeat(smull*2), sleft])
scales = np.hstack([sleft, scales[srepl:-srepr], sright])
scales = scales.reshape(-1, 1)
return scales
from matplotlib.animation import FuncAnimation
import matplotlib
matplotlib.use("Agg")
NOTE("Switched matplotlib to 'Agg' backend for animating")
wavelet = Wavelet._init_if_not_isinstance(wavelet)
scales = _make_anim_scales(scales, wavelet, N)
#### Compute Psi & Psih ##################################################
Psi = asnumpy(wavelet.psifn(scale=scales, N=N))
aPsi = np.abs(Psi)
t = np.arange(-N/2, N/2, step=1)
w = _xifn(1, N)[:N//2 + 1]
Psih = asnumpy(wavelet(scales * w))
#### Compute stdevs & respective indices #################################
Wc = np.zeros(len(scales))
std_W = Wc.copy()
std_T = Wc.copy()
for i, scale in enumerate(scales):
Wc[i] = center_frequency(wavelet, float(scale), N, kind='energy')
std_W[i] = freq_resolution( wavelet, float(scale), N, nondim=0)
std_T[i] = time_resolution( wavelet, float(scale), N, nondim=0,
min_decay=1)
_Wc = np.pi - Wc
Wlix = find_closest((_Wc - std_W).reshape(-1, 1), w).squeeze()
Wrix = find_closest((_Wc + std_W).reshape(-1, 1), w).squeeze()
Wl, Wr = w[Wlix], w[Wrix]
Tlix = find_closest(0 - std_T.reshape(-1, 1), t).squeeze()
Trix = find_closest(0 + std_T.reshape(-1, 1), t).squeeze()
Tl, Tr = t[Tlix], t[Trix]
## Rescale Psi so that its y-coords span 1/5 of Psih's x-coords, & vice-versa
frac = 5
Psig = Psi * (w.max() / aPsi.max(axis=-1)).reshape(-1, 1) / frac
aPsig = aPsi * (w.max() / aPsi.max(axis=-1)).reshape(-1, 1) / frac
Psihg = Psih * (t.max() / Psih.max(axis=-1)).reshape(-1, 1) / frac
# additionally shift Psih to Psi's left
Psihg += t.min()
## Find intersections ####################################################
sidx = np.arange(len(scales))
W_xminu, W_xmax = Psihg[:, ::-1][sidx, Wlix], Tr
W_xmind = Psihg[:, ::-1][sidx, Wrix] # Psih not necessarily symmetric
W_ymin, W_ymax = Wl, Wr
T_xmin, T_xmax = Tl, Tr
T_yminl, T_ymax = aPsig[sidx, Tlix], Wr
T_yminr = aPsig[sidx, Trix] # same for Psi
## Set up plot objects ###################################################
fig, ax = plt.subplots()
ax.set_xlim([t.min()*1.02, t.max()*1.02])
ax.set_ylim([-aPsig.max()*1.05, np.pi*1.02])
ylabels = np.round(np.linspace(np.pi, 0, 7), 1)
plt.yticks(np.linspace(0, np.pi, len(ylabels)), ylabels)
fig.set_size_inches(12*width, 12*height)
## Title: wavelet name & parameters
title = wavelet._desc(N=N)
ax.set_title(title, loc='left', weight='bold', fontsize=16)
line1, = ax.plot([], [], color='tab:blue')
line2, = ax.plot([], [], color='tab:orange')
line3, = ax.plot([], [], color='k', linestyle='--')
line4, = ax.plot([], [], color='purple')
lkw = dict(color='k', linewidth=1)
line5, = ax.plot([], [], **lkw)
line6, = ax.plot([], [], **lkw)
line7, = ax.plot([], [], **lkw)
line8, = ax.plot([], [], **lkw)
tkw = dict(horizontalalignment='center', verticalalignment='center',
transform=ax.transAxes, fontsize=15, weight='bold')
txt = ax.text(.9, .95, "scale=%.2f" % scales[0], **tkw)
fig.tight_layout()
#### Animate #############################################################
def unique_savepath(savepath):
"""Ensure doesn't overwrite existing"""
sp = Path(savepath)
savename = sp.stem
if sp.is_file():
paths = [str(p.stem) for p in Path(savepath).parent.iterdir()
if savename in p.stem]
maxnum = 0
for p in paths:
num = p.replace(savename, '')
if num != '' and int(num) > maxnum:
maxnum = int(num)
sp = Path(sp.parent, savename + str(maxnum + 1) + sp.suffix)
sp = str(sp)
return sp
def animate(i):
line1.set_data(t, Psig[i].real)
line2.set_data(t, Psig[i].imag)
line3.set_data(t, aPsig[i])
line4.set_data(Psihg[i][::-1], w)
line5.set_data([T_xmin[i], T_xmin[i]], [T_yminl[i], T_ymax[i]])
line6.set_data([T_xmax[i], T_xmax[i]], [T_yminr[i], T_ymax[i]])
line7.set_data([W_xminu[i], W_xmax[i]], [W_ymin[i], W_ymin[i]])
line8.set_data([W_xmind[i], W_xmax[i]], [W_ymax[i], W_ymax[i]])
txt.set_text("scale=%.2f" % scales[i])
return line1, line2, line3, line4, line5, line6, line7, line8
sp = unique_savepath(savepath)
print(("Successfully computed parameters, scales ranging {:.2f} to {:.2f}; "
"animating...\nWill save to: {}").format(
scales.min(), scales.max(), sp), flush=True)
frames = np.hstack([range(len(scales)), range(len(scales) - 1)[::-1]])
if testing: # animation takes long; skip when unit-testing
print("Passed `testing=True`, won't animate")
return
anim = FuncAnimation(fig, animate, frames=frames, interval=60,
blit=True, repeat=False)
anim.save(sp, writer='imagemagick')
print("Animated and saved to", sp, flush=True)
def wavelet_heatmap(wavelet, scales='log', N=2048):
wavelet = Wavelet._init_if_not_isinstance(wavelet)
if not isinstance(scales, np.ndarray):
scales = process_scales(scales, N, wavelet, use_padded_N=False)
#### Compute time- & freq-domain wavelets for all scales #################
Psi = asnumpy(wavelet.psifn(scale=scales, N=N))
w = _xifn(1, N)[:N//2 + 1]
Psih = asnumpy(wavelet(scales * w))
#### Plot ################################################################
mx = np.abs(Psi).max() * .01
title0 = wavelet._desc(N=N)
kw = dict(ylabel="scales", xlabel="samples")
imshow(Psi.real, norm=(-mx, mx), yticks=scales,
title=title0 + " | Time-domain; real part", **kw)
imshow(Psi, abs=1, cmap='bone', norm=(0, mx), yticks=scales,
title=title0 + " | Time-domain; abs-val", **kw)
kw['xlabel'] = "radians"
imshow(Psih, abs=1, yticks=scales, xticks=np.linspace(0, np.pi, N//2),
title=title0 + " | Freq-domain; abs-val", **kw)
def sweep_std_t(wavelet, N, scales='log', get=False, **kw):
def _process_kw(kw):
kw = kw.copy() # don't change external dict
defaults = dict(min_decay=1, max_mult=2, min_mult=2,
nondim=False, force_int=True)
for k, v in kw.items():
if k not in defaults:
raise ValueError(f"unsupported kwarg '{k}'; must be one of: "
+ ', '.join(defaults))
for k, v in defaults.items():
kw[k] = kw.get(k, v)
return kw
kw = _process_kw(kw)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
scales = process_scales(scales, N, wavelet)
std_ts = np.zeros(scales.size)
for i, scale in enumerate(scales):
std_ts[i] = time_resolution(wavelet, scale=scale, N=N, **kw)
title = "std_t [{}] vs log2(scales) | {} wavelet, {}".format(
"nondim" if kw['nondim'] else "s/c-rad", wavelet.name, wavelet.config_str)
hlines = ([N/2, N/4], dict(color='k', linestyle='--'))
plot(np.log2(scales), std_ts, title=title, hlines=hlines, show=1)
if get:
return std_ts
def sweep_std_w(wavelet, N, scales='log', get=False, **kw):
def _process_kw(kw):
kw = kw.copy() # don't change external dict
defaults = dict(nondim=False, force_int=True)
for k, v in kw.items():
if k not in defaults:
raise ValueError(f"unsupported kwarg '{k}'; must be one of: "
+ ', '.join(defaults))
for k, v in defaults.items():
kw[k] = kw.get(k, v)
return kw
kw = _process_kw(kw)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
scales = process_scales(scales, N, wavelet)
std_ws = np.zeros(scales.size)
for i, scale in enumerate(scales):
std_ws[i] = freq_resolution(wavelet, scale=scale, N=N, **kw)
title = "std_w [{}] vs log2(scales) | {} wavelet, {}".format(
"nondim" if kw['nondim'] else "s/c-rad", wavelet.name, wavelet.config_str)
plot(np.log2(scales), std_ws, title=title, show=1)
if get:
return std_ws
def sweep_harea(wavelet, N, scales='log', get=False, kw_w=None, kw_t=None):
"""Sub-.5 and near-0 areas will occur for very high scales as a result of
discretization limitations. Zero-areas have one non-zero frequency-domain,
and std_t==N/2, with latter more accurately set to infinity (which we don't).
Sub-.5 are per freq-domain assymetries degrading time-domain decay,
and limited bin discretization integrating unreliably (yet largely
meaningfully; the unreliable-ness appears emergent from discretization).
"""
kw_w, kw_t = (kw_w or {}), (kw_t or {})
wavelet = Wavelet._init_if_not_isinstance(wavelet)
scales = process_scales(scales, N, wavelet)
std_ws = sweep_std_w(wavelet, N, scales, get=True, **kw_w)
plt.show()
std_ts = sweep_std_t(wavelet, N, scales, get=True, **kw_t)
plt.show()
hareas = std_ws * std_ts
hline = (.5, dict(color='tab:red', linestyle='--'))
title = "(std_w * std_t) vs log2(scales) | {} wavelet, {}".format(
wavelet.name, wavelet.config_str)
plot(np.log2(scales), hareas, color='k', hlines=hline, title=title)
plt.show()
if get:
return hareas, std_ws, std_ts
def wavelet_waveforms(wavelet, N, scale, zoom=True):
wavelet = Wavelet._init_if_not_isinstance(wavelet)
## Freq-domain sampled #######################
w_peak, _ = find_maximum(wavelet.fn)
w_ct = np.linspace(0, w_peak*2, max(4096, p2up(N)[0])) # 'continuous-time'
w_dt = np.linspace(0, np.pi, N//2) * scale # sampling pts at `scale`
psih_ct = asnumpy(wavelet(w_ct))
psih_dt = asnumpy(wavelet(w_dt))
title = ("wavelet(w) sampled by xi at scale={:.2f}, N={} | {} wavelet, {}"
).format(scale, N, wavelet.name, wavelet.config_str)
plot(w_ct, psih_ct, title=title, xlabel="radians")
scat(w_dt, psih_dt, color='tab:red')
plt.legend(["psih at scale=1", "sampled at scale=%.2f" % scale], fontsize=13)
plt.axvline(w_peak, color='tab:red', linestyle='--')
plt.show()
## Freq-domain #######################
# if peak not near left, don't zoom; same as `if .. (w_peak >= w_dt.max())`
if not zoom or (np.argmax(psih_dt) > .05 * N/2):
end = None
else:
peak_idx = np.argmax(psih_dt)
end = np.where(psih_dt[peak_idx:] < 1e-4*psih_dt.max())[0][0]
end += peak_idx + 3 # +3: give few more indices for visual
w_dtn = w_dt * (np.pi / w_dt.max()) # norm to span true w
plot(w_dtn[:end], psih_dt[:end], xlabel="radians",
title="Freq-domain waveform (psih)" + ", zoomed" * (end is not None))
scat(w_dtn[:end], psih_dt[:end], color='tab:red', show=1)
## Time-domain #######################
psi = asnumpy(wavelet.psifn(scale=scale, N=N))
apsi = np.abs(psi)
t = np.arange(-N/2, N/2, step=1)
# don't zoom unless there's fast decay
peak_idx = np.argmax(apsi)
if not zoom or (apsi.max() / apsi[peak_idx:].min() <= 1e3):
start, end = 0, None
else:
dt = np.where(apsi[peak_idx:] < 1e-3*apsi.max())[0][0]
start, end = (N//2 - dt, N//2 + dt + 1)
plot(t[start:end], psi[start:end], complex=1, xlabel="samples",
title="Time-domain waveform (psi)" + ", zoomed" * (end is not None))
plot(t[start:end], apsi[start:end], color='k', linestyle='--', show=1)
def _viz_cwt_scalebounds(wavelet, N, min_scale=None, max_scale=None,
std_t=None, cutoff=1, stdevs=2, Nt=None):
"""Can be used to visualize time & freq domains separately, where
`min_scale` refers to scale at which to show the freq-domain wavelet, and
`max_scale` the time-domain one.
"""
def _viz_max(wavelet, N, max_scale, std_t, stdevs, Nt):
if Nt is None:
Nt = p2up(N)[0]
if std_t is None:
# permissive max_mult to not crash visual
std_t = time_resolution(wavelet, max_scale, N, nondim=False,
min_mult=2, max_mult=2, min_decay=1)
t = np.arange(-Nt/2, Nt/2, step=1)
t -= t.mean()
psi = asnumpy(wavelet.psifn(scale=max_scale, N=len(t)))
plot(t, np.abs(psi)**2, ylims=(0, None),
title="|Time-domain wavelet|^2, extended (outside dashed)")
plt.axvline(std_t, color='tab:red')
plt.axvline(std_t * stdevs, color='tab:green')
# mark target (non-extended) frame
_ = [plt.axvline(v, color='k', linestyle='--') for v in (-N/2, N/2-1)]
_kw = dict(fontsize=16, xycoords='axes fraction', weight='bold')
plt.annotate("1 stdev",
xy=(.88, .95), color='tab:red', **_kw)
plt.annotate("%s stdevs" % stdevs,
xy=(.88, .90), color='tab:green', **_kw)
plt.show()
def _viz_min(wavelet, N, min_scale, cutoff):
w = _xifn(1, N)[:N//2 + 1] # drop negative freqs
psih = asnumpy(wavelet(min_scale * w, nohalf=True))
_, mx = find_maximum(wavelet)
plot(w, psih, title=("Frequency-domain wavelet, positive half "
"(cutoff=%s, peak=%.3f)" % (cutoff, mx)))
plt.axhline(mx * abs(cutoff), color='tab:red')
plt.show()
if min_scale is not None:
_viz_min(wavelet, N, min_scale, cutoff)
if max_scale is not None:
_viz_max(wavelet, N, max_scale, std_t, stdevs, Nt)
if not (min_scale or max_scale):
raise ValueError("Must set at least one of `min_scale`, `max_scale`")
def wavelet_filterbank(wavelet, N=1024, scales='log', skips=0, title_append=None,
positives=False, show=True, get=False):
"""Plot all frequency-domain wavelets, superposed.
`skips=1` will plot every *other* wavelet, `=2` will skip 2, etc.
`=0` shows all.
`title_append`: will `title += title_append` if not None. Must be string.
Can use to display additional info.
`positives=True` will show full wavelets as opposed to trimmed at Nyquist.
`get=True` to return the filter bank (ignores `skip`).
"""
def _title():
scaletype = infer_scaletype(scales)[0]
desc = wavelet._desc(N=N)
desc = desc.replace(" |", " filterbank |")
title = "{}, scaletype={}{}".format(desc, scaletype, title_append or '')
title = _textwrap(title, wrap_len=72)
return title
# process `scales` & prepare freq-domain wavelets
scales = process_scales(scales, N, wavelet)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
Psih = asnumpy(wavelet(scale=scales, N=N))
# process `skips`
Psih_show, scales_show = [], []
for i, psih in enumerate(Psih):
if i % (skips + 1) == 0:
Psih_show.append(psih)
scales_show.append(scales[i])
Psih_show = np.vstack(Psih_show).T
# prepare plot params
if positives:
w = None
xlims = (-N/100, N*1.01)
else:
Psih_show = Psih_show[:N//2]
w = np.linspace(0, np.pi, N//2, endpoint=True)
xlims = (-np.pi/100, np.pi*1.01)
# plot
if positives:
plt.axvline(N/2, color='tab:red') # show Nyquist
plot(w, Psih_show, color='tab:blue', title=_title(), xlims=xlims, show=0,
xlabel="radians")
# style
_, ymax = plt.gca().get_ylim()
plt.ylim(-ymax/100, ymax*1.03)
txt = "(min, max)=(%.3f, %.1f)" % (np.min(scales_show), np.max(scales_show))
_annotate(txt, xy=(.63, .95), fontsize=17)
if show:
plt.show()
if get:
return Psih
def viz_cwt_higher_order(Wx_k, scales=None, wavelet=None, **imshow_kw):
if wavelet is not None:
wavelet = Wavelet._init_if_not_isinstance(wavelet)
title_append = " | " + wavelet._desc(show_N=False)
else:
title_append = ''
yticks = scales.squeeze() if (scales is not None) else None
if imshow_kw.get('ticks', 1):
imshow_kw['yticks'] = imshow_kw.get('yticks', yticks)
if isinstance(Wx_k, list):
for k, Wx in enumerate(Wx_k):
title = "abs(CWT), order={}{}".format(k, title_append)
imshow(Wx, abs=1, title=title, **imshow_kw)
Wx_ka = np.mean(np.abs(np.vstack([Wx_k])), axis=0)
order_str = ','.join(map(str, range(len(Wx_k))))
title = "abs(CWT), orders {} avg{}".format(order_str, title_append)
imshow(Wx_ka, abs=1, title=title, **imshow_kw)
else:
title = "abs(CWT), higher-order avg{}".format(title_append)
imshow(Wx_k, abs=1, title=title, **imshow_kw)
def viz_gmw_orders(N=1024, n_orders=3, scale=5, gamma=3, beta=60,
norm='bandpass'):
wavs = []
for k in range(n_orders):
wav = Wavelet(('gmw', dict(gamma=gamma, beta=beta, norm=norm, order=k)))
wavs.append(wav)
psihs = [wav(scale=scale)[:N//2 + 1] for wav in wavs]
psis = [wav.psifn(scale=scale) for wav in wavs]
w = np.linspace(0, np.pi, N//2 + 1, endpoint=True)
desc = wavs[0]._desc(show_N=False)
orders_str = ','.join(map(str, range(n_orders)))
for psih in psihs:
plot(w, psih, title="Freq-domain, orders=%s | %s" % (orders_str, desc))
plot([], show=1)
for k, psi in enumerate(psis):
plot(psi, complex=1)
plot(psi, abs=1, color='k', linestyle='--', show=1,
title=f"Time-domain, order={k} | {desc}")
#### Visual tools ## messy code ##############################################
def imshow(data, title=None, show=1, cmap=None, norm=None, complex=None, abs=0,
w=None, h=None, ridge=0, ticks=1, borders=1, aspect='auto', ax=None,
fig=None, yticks=None, xticks=None, xlabel=None, ylabel=None,
norm_scaling=1, **kw):
"""
norm: color norm, tuple of (vmin, vmax)
abs: take abs(data) before plotting
ticks: False to not plot x & y ticks
borders: False to not display plot borders
w, h: rescale width & height
norm_scaling: multiplies `norm`, even if `norm` is None (multiplies default)
kw: passed to `plt.imshow()`
others
"""
# axes
if (ax or fig) and complex:
NOTE("`ax` and `fig` ignored if `complex`")
if complex:
fig, ax = plt.subplots(1, 2)
else:
ax = ax or plt.gca()
fig = fig or plt.gcf()
# norm
if norm is None:
mx = np.max(np.abs(data))
vmin, vmax = ((-mx, mx) if not abs else
(0, mx))
else:
vmin, vmax = norm
vmin *= norm_scaling
vmax *= norm_scaling
# colormap
import matplotlib as mpl
mpl33 = bool(float(mpl.__version__[:3]) >= 3.3)
if cmap is None:
cmap = (('turbo' if mpl33 else 'jet') if abs else
'bwr')
elif cmap == 'turbo':
if not mpl33:
from .utils import WARN
WARN("'turbo' colormap requires matplotlib>=3.3; using 'jet' instead")
cmap = 'jet'
_kw = dict(vmin=vmin, vmax=vmax, cmap=cmap, aspect=aspect, **kw)
if abs:
ax.imshow(np.abs(data), **_kw)
elif complex:
ax[0].imshow(data.real, **_kw)
ax[1].imshow(data.imag, **_kw)
plt.subplots_adjust(left=0, right=1, bottom=0, top=1,
wspace=0, hspace=0)
else:
ax.imshow(data.real, **_kw)
if w or h:
fig.set_size_inches(12 * (w or 1), 12 * (h or 1))
if ridge:
data_mx = np.where(np.abs(data) == np.abs(data).max(axis=0))
ax.scatter(data_mx[1], data_mx[0], color='r', s=4)
if not ticks:
ax.set_xticks([])
ax.set_yticks([])
if xticks is not None or yticks is not None:
_ticks(xticks, yticks, ax)
if not borders:
for spine in ax.spines:
ax.spines[spine].set_visible(False)
if xlabel is not None:
ax.set_xlabel(xlabel, weight='bold', fontsize=15)
if ylabel is not None:
ax.set_ylabel(ylabel, weight='bold', fontsize=15)
_maybe_title(title, ax=ax)
if show:
plt.show()
def plot(x, y=None, title=None, show=0, ax_equal=False, complex=0, abs=0,
c_annot=False, w=None, h=None, dx1=False, xlims=None, ylims=None,
vert=False, vlines=None, hlines=None, xlabel=None, ylabel=None,
xticks=None, yticks=None, ax=None, fig=None, ticks=True, squeeze=True,
auto_xlims=True, **kw):
"""
norm: color norm, tuple of (vmin, vmax)
abs: take abs(data) before plotting
complex: plot `x.real` & `x.imag`; `2` to also plot `abs(x)`
ticks: False to not plot x & y ticks
w, h: rescale width & height
kw: passed to `plt.imshow()`
others
"""
ax = ax or plt.gca()
fig = fig or plt.gcf()
if auto_xlims is None:
auto_xlims = bool((x is not None and len(x) != 0) or
(y is not None and len(y) != 0))
if x is None and y is None:
raise Exception("`x` and `y` cannot both be None")
elif x is None:
y = y if isinstance(y, list) or not squeeze else y.squeeze()
x = np.arange(len(y))
elif y is None:
x = x if isinstance(x, list) or not squeeze else x.squeeze()
y = x
x = np.arange(len(x))
x = x if isinstance(x, list) or not squeeze else x.squeeze()
y = y if isinstance(y, list) or not squeeze else y.squeeze()
if vert:
x, y = y, x
if complex:
ax.plot(x, y.real, color='tab:blue', **kw)
ax.plot(x, y.imag, color='tab:orange', **kw)
if complex == 2:
ax.plot(x, np.abs(y), color='k', linestyle='--', **kw)
if c_annot:
_kw = dict(fontsize=15, xycoords='axes fraction', weight='bold')
ax.annotate("real", xy=(.93, .95), color='tab:blue', **_kw)
ax.annotate("imag", xy=(.93, .90), color='tab:orange', **_kw)
else:
if abs:
y = np.abs(y)
ax.plot(x, y, **kw)
if dx1:
ax.set_xticks(np.arange(len(x)))
if vlines:
vhlines(vlines, kind='v')
if hlines:
vhlines(hlines, kind='h')
ticks = ticks if isinstance(ticks, (list, tuple)) else (ticks, ticks)
if not ticks[0]:
ax.set_xticks([])
if not ticks[1]:
ax.set_yticks([])
if xticks is not None or yticks is not None:
_ticks(xticks, yticks, ax)
if xticks is not None or yticks is not None:
_ticks(xticks, yticks, ax)
_maybe_title(title, ax=ax)
_scale_plot(fig, ax, show=show, ax_equal=ax_equal, w=w, h=h,
xlims=xlims, ylims=ylims, dx1=(len(x) if dx1 else 0),
xlabel=xlabel, ylabel=ylabel, auto_xlims=auto_xlims)
def plots(X, Y=None, nrows=None, ncols=None, tight=True, sharex=False,
sharey=False, skw=None, pkw=None, _scat=0, show=1, **kw):
"""Example:
X = [[None, np.arange(xc, xc + wl)],
[None, np.arange(xc + hop, xc + hop + wl)],
None,
None]
Y = [[x, window],
[x, window],
xbuf[:, xbc],
xbuf[:, xbc + 1]]
pkw = [[{}]*2, [{}]*2, *[{'color': 'tab:green'}]*2]
plots(X, Y, nrows=2, ncols=2, sharey='row', tight=tight, pkw=pkw)
"""
def _process_args(X, Y, nrows, ncols, tight, skw, pkw, kw):
X = X if isinstance(X, list) else [X]
Y = Y if isinstance(Y, list) else [Y]
skw = skw or {}
pkw = pkw or [{}] * len(X)
if nrows is None and ncols is None:
nrows, ncols = len(X), 1
elif nrows is None:
nrows = max(len(X) // ncols, 1)
elif ncols is None:
ncols = max(len(X) // nrows, 1)
default = dict(left=0, right=1, bottom=0, top=1, hspace=.1, wspace=.05)
if tight:
if not isinstance(tight, dict):
tight = default.copy()
else:
for name in default:
if name not in tight:
tight[name] = default[name]
kw['w'] = kw.get('w', .8)
kw['h'] = kw.get('h', .8) # default 'tight' enlarges plot
return X, Y, nrows, ncols, tight, skw, pkw, kw
X, Y, nrows, ncols, tight, skw, pkw, kw = _process_args(
X, Y, nrows, ncols, tight, skw, pkw, kw)
_, axes = plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey, **skw)
for ax, x, y, _pkw in zip(axes.flat, X, Y, pkw):
if isinstance(x, list):
for _x, _y, __pkw in zip(x, y, _pkw):
plot(_x, _y, ax=ax, **__pkw, **kw)
if _scat:
scat(_x, _y, ax=ax, **__pkw, **kw)
else:
plot(x, y, ax=ax, **_pkw, **kw)
if _scat:
scat(x, y, ax=ax, **_pkw, **kw)
if tight:
plt.subplots_adjust(**tight)
if show:
plt.show()
def scat(x, y=None, title=None, show=0, ax_equal=False, s=18, w=None, h=None,
xlims=None, ylims=None, dx1=False, vlines=None, hlines=None, ticks=1,
complex=False, abs=False, xlabel=None, ylabel=None, ax=None, fig=None,
auto_xlims=True, **kw):
ax = ax or plt.gca()
fig = fig or plt.gcf()
if auto_xlims is None:
auto_xlims = bool((x is not None and len(x) != 0) or
(y is not None and len(y) != 0))
if x is None and y is None:
raise Exception("`x` and `y` cannot both be None")
elif x is None:
x = np.arange(len(y))
elif y is None:
y = x
x = np.arange(len(x))
if complex:
ax.scatter(x, y.real, s=s, **kw)
ax.scatter(x, y.imag, s=s, **kw)
else:
if abs:
y = np.abs(y)
ax.scatter(x, y, s=s, **kw)
if not ticks:
ax.set_xticks([])
ax.set_yticks([])
_maybe_title(title, ax=ax)
if vlines:
vhlines(vlines, kind='v')
if hlines:
vhlines(hlines, kind='h')
_scale_plot(fig, ax, show=show, ax_equal=ax_equal, w=w, h=h,
xlims=xlims, ylims=ylims, dx1=(len(x) if dx1 else 0),
xlabel=xlabel, ylabel=ylabel, auto_xlims=auto_xlims)
def plotscat(*args, **kw):
show = kw.pop('show', False)
plot(*args, **kw)
scat(*args, **kw)
if show:
plt.show()
def hist(x, bins=500, title=None, show=0, stats=0, ax=None, fig=None,
w=1, h=1, xlims=None, ylims=None, xlabel=None, ylabel=None):
"""Histogram. `stats=True` to print mean, std, min, max of `x`."""
def _fmt(*nums):
return [(("%.3e" % n) if (abs(n) > 1e3 or abs(n) < 1e-3) else
("%.3f" % n)) for n in nums]
ax = ax or plt.gca()
fig = fig or plt.gcf()
x = np.asarray(x)
_ = ax.hist(x.ravel(), bins=bins)
_maybe_title(title, ax=ax)
_scale_plot(fig, ax, show=show, w=w, h=h, xlims=xlims, ylims=ylims,
xlabel=xlabel, ylabel=ylabel)
if show:
plt.show()
if stats:
mu, std, mn, mx = (x.mean(), x.std(), x.min(), x.max())
print("(mean, std, min, max) = ({}, {}, {}, {})".format(
*_fmt(mu, std, mn, mx)))
return mu, std, mn, mx
def vhlines(lines, kind='v'):
lfn = plt.axvline if kind=='v' else plt.axhline
if not isinstance(lines, (list, tuple)):
lines, lkw = [lines], {}
elif isinstance(lines, (list, np.ndarray)):
lkw = {}
elif isinstance(lines, tuple):
lines, lkw = lines
lines = lines if isinstance(lines, (list, np.ndarray)) else [lines]
else:
raise ValueError("`lines` must be list or (list, dict) "
"(got %s)" % lines)
for line in lines:
lfn(line, **lkw)
def _fmt(*nums):
return [(("%.3e" % n) if (abs(n) > 1e3 or abs(n) < 1e-3) else
("%.3f" % n)) for n in nums]
def _ticks(xticks, yticks, ax):
def fmt(ticks):
if all(isinstance(h, str) for h in ticks):
return "%s"
return ("%.d" if all(float(h).is_integer() for h in ticks) else
"%.2f")
if yticks is not None:
if not hasattr(yticks, '__len__') and not yticks:
ax.set_yticks([])
else:
idxs = np.linspace(0, len(yticks) - 1, 8).astype('int32')
yt = [fmt(yticks) % h for h in np.asarray(yticks)[idxs]]
ax.set_yticks(idxs)
ax.set_yticklabels(yt)
if xticks is not None:
if not hasattr(xticks, '__len__') and not xticks:
ax.set_xticks([])
else:
idxs = np.linspace(0, len(xticks) - 1, 8).astype('int32')
xt = [fmt(xticks) % h for h in np.asarray(xticks)[idxs]]
ax.set_xticks(idxs)
ax.set_xticklabels(xt)
def _maybe_title(title, ax=None):
if title is None:
return
title, kw = (title if isinstance(title, tuple) else
(title, {}))
defaults = gdefaults('visuals._maybe_title', get_all=True, as_dict=True)
for name in defaults:
kw[name] = kw.get(name, defaults[name])
if ax:
ax.set_title(str(title), **kw)
else:
plt.title(str(title), **kw)
def _scale_plot(fig, ax, show=False, ax_equal=False, w=None, h=None,
xlims=None, ylims=None, dx1=False, xlabel=None, ylabel=None,
auto_xlims=True):
if xlims:
ax.set_xlim(*xlims)
elif auto_xlims:
xmin, xmax = ax.get_xlim()
rng = xmax - xmin
ax.set_xlim(xmin + .018 * rng, xmax - .018 * rng)
if ax_equal:
yabsmax = max(np.abs([*ax.get_ylim()]))
mx = max(yabsmax, max(np.abs([xmin, xmax])))
ax.set_xlim(-mx, mx)
ax.set_ylim(-mx, mx)
fig.set_size_inches(8*(w or 1), 8*(h or 1))
if xlims:
ax.set_xlim(*xlims)
if ylims:
ax.set_ylim(*ylims)
if dx1:
plt.xticks(np.arange(dx1))
if w or h:
fig.set_size_inches(14*(w or 1), 8*(h or 1))
if xlabel is not None:
plt.xlabel(xlabel, weight='bold', fontsize=15)
if ylabel is not None:
plt.ylabel(ylabel, weight='bold', fontsize=15)
if show:
plt.show()
def _annotate(txt, xy=(.85, .9), weight='bold', fontsize=16):
_kw = dict(xycoords='axes fraction', xy=xy, weight=weight, fontsize=fontsize)
try:
# 'Consolas' for vertical align
plt.annotate(txt, family='Consolas', **_kw)
except:
plt.annotate(txt, **_kw) # in case platform lacks 'Consolas'
#############################################################################
from .wavelets import Wavelet, _xifn
from .wavelets import center_frequency, freq_resolution, time_resolution
from .utils.common import NOTE, _textwrap, p2up
from .utils.cwt_utils import process_scales, cwt_scalebounds, make_scales
from .utils.cwt_utils import infer_scaletype
from .utils.backend import asnumpy
| 37,418
| 35.153623
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/experimental.py
|
# -*- coding: utf-8 -*-
import warnings
import numpy as np
from .wavelets import Wavelet, center_frequency
from .utils import backend as S, cwt_scalebounds
from .utils.common import EPS32, EPS64, p2up, trigdiff
from .ssqueezing import ssqueeze
from ._ssq_cwt import phase_cwt, phase_cwt_num
from ._ssq_stft import phase_stft, _make_Sfs
__all__ = ['freq_to_scale', 'scale_to_freq', 'phase_ssqueeze', 'phase_transform']
def freq_to_scale(freqs, wavelet, N, fs=1, n_search_scales=None, kind='peak',
base=2):
"""Convert frequencies to scales.
# Arguments:
freqs: np.ndarray
1D array of frequencies. Must range between 0 and `N/fs/2` (Nyquist).
wavelet: wavelets.Wavelet / str / tupe[str, dict]
Wavelet.
N: int
`len(x)` of interest.
fs: int
Sampling rate in Hz.
n_search_scales: int / None
This method approximates the conversion of scales. Higher = better
accuracy, but takes longer. Defaults to `10 * len(freqs)`.
kind: str
Mapping to use, one of: 'peak', 'energy', 'peak-ct'.
See `help(ssqueezepy.center_frequency)`.
base: int
Base of exponent of `freqs`. Defaults to 2.
`freqs` can be any distributed in any way, including mix of log
and linear, so the base only helps improve the search if it matches
that of `freqs`. If `freqs` is purely exponential, then
`base = np.diff(np.log(freqs))[0] * 2.718281828`.
# Returns:
scales: np.ndarray
1D arrays of scales.
"""
def logb(x, base=2):
return np.log(x) / np.log(base)
def log(x):
return logb(x, base)
freqs = freqs / fs # convert to unitless, [0., 0.5)
assert np.all(freqs >= 0), "frequencies must be positive"
assert freqs.max() <= 0.5, "max frequency must be 0.5"
assert freqs.max() == freqs[-1], "max frequency must be last sample"
assert freqs.min() == freqs[0], "min frequency must be first sample"
M = len(freqs)
if n_search_scales is None:
n_search_scales = 10 * M
smin, smax = cwt_scalebounds(wavelet, N, preset='maximal', use_padded_N=False)
search_scales = np.logspace(log(smin), log(smax), n_search_scales, base=base)
w_from_scales = []
for scale in search_scales:
w = center_frequency(wavelet, scale, N, kind=kind)
w_from_scales.append(min(max(w, 0), np.pi))
f_from_scales = np.array(w_from_scales) / (2*np.pi)
# pick closest match
fmin, fmax = freqs.min(), freqs.max()
smax = search_scales[np.argmin(np.abs(f_from_scales - fmin))]
smin = search_scales[np.argmin(np.abs(f_from_scales - fmax))]
# make scales between found min and max
scales = np.logspace(log(smax), log(smin), M, base=base)
return scales
def scale_to_freq(scales, wavelet, N, fs=1, padtype='reflect'):
"""Convert scales to frequencies.
# Arguments:
freqs: np.ndarray
1D array of frequencies. Must range between 0 and `N/fs/2` (Nyquist).
wavelet: wavelets.Wavelet / str / tupe[str, dict]
Wavelet.
N: int
`len(x)` of interest.
fs: int
Sampling rate in Hz.
padtype: str / None
`padtype` used in the transform. Used to determine the length
of wavelets used in the transform: `None` uses half the length
relative to `not None`.
The exact value doesn't matter, only whether it's `None` or not.
# Returns:
freqs: np.ndarray
1D arrays of frequencies.
"""
# process args
if isinstance(scales, float):
scales = np.array([scales])
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# evaluate wavelet at `scales`
Npad = int(2**np.ceil(np.log2(N))) if padtype is not None else N
psis = wavelet(scale=scales, N=Npad)
if hasattr(psis, 'cpu'):
psis = psis.cpu().numpy()
# find peak indices
idxs = np.argmax(psis, axis=-1)
# check
# https://github.com/OverLordGoldDragon/ssqueezepy/issues/41
if np.any(idxs > Npad//2) or 0 in idxs:
warnings.warn("found potentially ill-behaved wavelets (peak indices at "
"negative freqs or at dc); will round idxs to 1 or N/2")
n_psis = len(psis)
for i, ix in enumerate(idxs):
if ix > Npad//2 or ix == 0:
if i > n_psis // 2: # low freq
idxs[i] = 1
else: # high freq
idxs[i] = Npad//2
# convert
freqs = idxs / Npad # [0, ..., .5]
assert freqs.min() >= 0, freqs.min()
assert freqs.max() <= 0.5, freqs.max()
freqs *= fs # [0, ..., fs/2]
return freqs
def phase_ssqueeze(Wx, dWx=None, ssq_freqs=None, scales=None, Sfs=None, fs=1.,
t=None, squeezing='sum', maprange=None, wavelet=None,
gamma=None, was_padded=True, flipud=False,
rpadded=False, padtype=None, N=None, n1=None,
difftype=None, difforder=None,
get_w=False, get_dWx=False, transform='cwt'):
"""Take `phase_transform` then `ssqueeze`. Can be used on an arbitrary
CWT/STFT-like time-frequency transform `Wx`.
Experimental; prefer `ssq_cwt` & `ssq_stft`.
# Arguments:
Wx, dWx (see w), ssq_freqs, scales, Sfs, fs, t, squeezing, maprange,
wavelet, gamma, was_padded, flipud:
See `help(ssqueezing.ssqueeze)`.
rpadded: bool (default None) / None
Whether `Wx` (and `dWx`) is passed in padded. `True` will unpad
`Wx` and `dWx` before SSQ. Also, if `dWx` is None:
- `rpadded==False`: will pad `Wx` in computing `dWx` if
`padtype!=None`, then unpad both before SSQ
- `rpadded==True`: won't pad `Wx` regardless of `padtype`
padtype: str / None
Used if `rpadded==False`. See `help(utils.padsignal)`. Note that
padding `Wx` isn't same as passing padded `Wx` from `cwt`, but it
can get close.
N, n1: int / None
Needed if `rpadded==True` to unpad `Wx` & `dWx` as `Wx[:, n1:n1 + N]`.
difftype, difforder: str
Used if `dWx = None` and `transform == 'cwt'`; see `help(ssq_cwt)`.
get_w, get_dWx: bool
See `help(ssq_cwt)`.
# Returns:
Tx, Wx, ssq_freqs, scales, Sfs, w, dWx
"""
w, Wx, dWx, Sfs, gamma = phase_transform(
Wx, dWx, difftype, difforder=difforder, gamma=gamma, rpadded=rpadded,
padtype=padtype, N=N, n1=n1, get_w=get_w, fs=fs, transform=transform)
if w is not None and not get_dWx:
dWx = None
if maprange is None:
maprange = 'peak' if transform == 'cwt' else 'maximal'
Tx, ssq_freqs = ssqueeze(Wx, w, ssq_freqs, scales, Sfs, fs=fs, t=t,
squeezing=squeezing, maprange=maprange,
wavelet=wavelet, gamma=gamma, was_padded=was_padded,
flipud=flipud, dWx=dWx, transform=transform)
return Tx, Wx, ssq_freqs, scales, Sfs, w, dWx
def phase_transform(Wx, dWx=None, difftype='trig', difforder=4, gamma=None,
fs=1., Sfs=None, rpadded=False, padtype='reflect', N=None,
n1=None, get_w=False, transform='cwt'):
"""Unified method for CWT & STFT SSQ phase transforms.
See `help(_ssq_cwt.phase_cwt)` and `help(_ssq_stft.phase_stft)`.
"""
def _cwt(Wx, dWx, fs, gamma, N, n1, difftype, difforder, rpadded, padtype,
get_w):
# infer `N` and/or `n1`
if N is None and not rpadded:
N = Wx.shape[-1]
if n1 is None:
_, n1, _ = p2up(N)
# compute `dWx` if not supplied
if dWx is None:
dWx = trigdiff(Wx, fs, padtype, rpadded, N=N, n1=n1, transform='cwt')
if get_w:
if difftype == 'trig':
# calculate instantaneous frequency directly from the
# frequency-domain derivative
w = phase_cwt(Wx, dWx, difftype, gamma)
elif difftype == 'phase':
# !!! bad; yields negatives, and forcing abs(w) doesn't help
# calculate inst. freq. from unwrapped phase of CWT
w = phase_cwt(Wx, None, difftype, gamma)
elif difftype == 'numeric':
# !!! tested to be very inaccurate for small scales
# calculate derivative numericly
Wx = Wx[:, (n1 - 4):(n1 + N + 4)]
dt = 1 / fs
w = phase_cwt_num(Wx, dt, difforder, gamma)
else:
w = None
return w, Wx, dWx
def _stft(Wx, dWx, fs, gamma, Sfs, get_w):
if Sfs is None:
Sfs = _make_Sfs(Wx, fs)
if get_w:
w = phase_stft(Wx, dWx, Sfs, gamma)
else:
w = None
return w, Wx, dWx, Sfs
# validate args
if transform == 'stft' and dWx is None:
raise NotImplementedError("`phase_transform` without `dWx` for "
"STFT is not currently supported.")
if rpadded and N is None:
raise ValueError("`rpadded=True` requires `N`")
if Wx.ndim > 2 and get_w:
raise NotImplementedError("`get_w=True` unsupported with batched input.")
# gamma
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Wx, 'complex128') else EPS32)
# take phase transform if `get_w` else only compute `dWx` (if None)
if transform == 'cwt':
w, Wx, dWx = _cwt(Wx, dWx, fs, gamma, N, n1, difftype, difforder,
rpadded, padtype, get_w)
Sfs = None
elif transform == 'stft':
w, Wx, dWx, Sfs = _stft(Wx, dWx, fs, gamma, Sfs, get_w)
return w, Wx, dWx, Sfs, gamma
| 9,923
| 37.169231
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/__init__.py
|
# -*- coding: utf-8 -*-
"""
MIT License
===========
Some ssqueezepy source files under other terms (see NOTICE.txt).
Copyright (c) 2020 John Muradeli
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = '0.6.3'
__title__ = 'ssqueezepy'
__author__ = 'John Muradeli'
__license__ = __doc__
__project_url__ = 'https://github.com/OverLordGoldDragon/ssqueezepy'
try:
import matplotlib.pyplot as plt
except ImportError:
class PltDummy():
def __getattr__(self, name):
raise ValueError("`ssqueezepy.visuals` requires "
"`matplotlib` installed.")
plt = PltDummy()
from . import utils
from . import ssqueezing
from . import _cwt
from . import _stft
from . import _ssq_cwt
from . import _ssq_stft
from . import _gmw
from . import _test_signals
from . import wavelets
from . import ridge_extraction
from . import toolkit
from . import visuals
from . import algos
from . import configs
from . import experimental
from .ssqueezing import *
from ._cwt import *
from ._stft import *
from ._ssq_cwt import *
from ._ssq_stft import *
from ._gmw import *
from ._test_signals import *
from .wavelets import *
from .ridge_extraction import *
from .utils.fft_utils import *
from .configs import IS_PARALLEL, USE_GPU
def wavs():
return wavelets.Wavelet.SUPPORTED
_modules_toplevel = [
'_cwt', '_gmw', '_ssq_cwt', '_ssq_stft', '_stft', '_test_signals', 'algos',
'configs', 'experimental', 'ridge_extraction', 'ssqueezing', 'toolkit',
'visuals', 'wavelets', 'utils'
]
| 2,523
| 28.011494
| 79
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/_ssq_stft.py
|
# -*- coding: utf-8 -*-
import numpy as np
from ._stft import stft, get_window, _check_NOLA
from ._ssq_cwt import _invert_components, _process_component_inversion_args
from .utils.cwt_utils import _process_fs_and_t, infer_scaletype
from .utils.common import WARN, EPS32, EPS64
from .utils import backend as S
from .utils.backend import torch
from .algos import phase_stft_cpu, phase_stft_gpu
from .ssqueezing import ssqueeze, _check_ssqueezing_args
def ssq_stft(x, window=None, n_fft=None, win_len=None, hop_len=1, fs=None, t=None,
modulated=True, ssq_freqs=None, padtype='reflect', squeezing='sum',
gamma=None, preserve_transform=None, dtype=None, astensor=True,
flipud=False, get_w=False, get_dWx=False):
"""Synchrosqueezed Short-Time Fourier Transform.
Implements the algorithm described in Sec. III of [1].
MATLAB docs: https://www.mathworks.com/help/signal/ref/fsst.html
# Arguments:
x: np.ndarray
Input vector(s), 1D or 2D. See `help(cwt)`.
window, n_fft, win_len, hop_len, fs, t, padtype, modulated
See `help(stft)`.
ssq_freqs, squeezing
See `help(ssqueezing.ssqueeze)`.
`ssq_freqs`, if array, must be linearly distributed.
gamma: float / None
See `help(ssqueezepy.ssq_cwt)`.
preserve_transform: bool (default True)
Whether to return `Sx` as directly output from `stft` (it might be
altered by `ssqueeze` or `phase_transform`). Uses more memory
per storing extra copy of `Sx`.
dtype: str['float32', 'float64'] / None
See `help(stft)`.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
flipud: bool (default False)
See `help(ssqueeze)`.
get_w, get_dWx
See `help(ssq_cwt)`.
(Named `_dWx` instead of `_dSx` for consistency.)
# Returns:
Tx: np.ndarray
Synchrosqueezed STFT of `x`, of same shape as `Sx`.
Sx: np.ndarray
STFT of `x`. See `help(stft)`.
ssq_freqs: np.ndarray
Frequencies associated with rows of `Tx`.
Sfs: np.ndarray
Frequencies associated with rows of `Sx` (by default == `ssq_freqs`).
w: np.ndarray (if `get_w=True`)
Phase transform of STFT of `x`. See `help(phase_stft)`.
dSx: np.ndarray (if `get_dWx=True`)
Time-derivative of STFT of `x`. See `help(stft)`.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
"""
if x.ndim == 2 and get_w:
raise NotImplementedError("`get_w=True` unsupported with batched input.")
_, fs, _ = _process_fs_and_t(fs, t, x.shape[-1])
_check_ssqueezing_args(squeezing)
# assert ssq_freqs, if array, is linear
if (isinstance(ssq_freqs, np.ndarray) and
infer_scaletype(ssq_freqs)[0] != 'linear'):
raise ValueError("`ssq_freqs` must be linearly distributed "
"for `ssq_stft`")
Sx, dSx = stft(x, window, n_fft=n_fft, win_len=win_len, hop_len=hop_len,
fs=fs, padtype=padtype, modulated=modulated, derivative=True,
dtype=dtype)
# preserve original `Sx` or not
if preserve_transform is None:
preserve_transform = not S.is_tensor(Sx)
if preserve_transform:
_Sx = (Sx.copy() if not S.is_tensor(Sx) else
Sx.detach().clone())
else:
_Sx = Sx
# make `Sfs`
Sfs = _make_Sfs(Sx, fs)
# gamma
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Sx, 'complex128') else EPS32)
# compute `w` if `get_w` and free `dWx` from memory if `not get_dWx`
if get_w:
w = phase_stft(_Sx, dSx, Sfs, gamma)
_dSx = None # don't use in `ssqueeze`
if not get_dWx:
dSx = None
else:
w = None
_dSx = dSx
# synchrosqueeze
if ssq_freqs is None:
ssq_freqs = Sfs
Tx, ssq_freqs = ssqueeze(_Sx, w, squeezing=squeezing, ssq_freqs=ssq_freqs,
Sfs=Sfs, flipud=flipud, gamma=gamma, dWx=_dSx,
maprange='maximal', transform='stft')
# return
if not astensor and S.is_tensor(Tx):
Tx, Sx, ssq_freqs, Sfs, w, dSx = [
g.cpu().numpy() if S.is_tensor(g) else g
for g in (Tx, Sx, ssq_freqs, Sfs, w, dSx)]
if get_w and get_dWx:
return Tx, Sx, ssq_freqs, Sfs, w, dSx
elif get_w:
return Tx, Sx, ssq_freqs, Sfs, w
elif get_dWx:
return Tx, Sx, ssq_freqs, Sfs, dSx
else:
return Tx, Sx, ssq_freqs, Sfs
def issq_stft(Tx, window=None, cc=None, cw=None, n_fft=None, win_len=None,
hop_len=1, modulated=True):
"""Inverse synchrosqueezed STFT.
# Arguments:
x: np.ndarray
Input vector, 1D.
window, n_fft, win_len, hop_len, modulated
See `help(stft)`. Must match those used in `ssq_stft`.
cc, cw: np.ndarray
See `help(issq_cwt)`.
# Returns:
x: np.ndarray
Signal as reconstructed from `Tx`.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
2. Fourier synchrosqueezed transform MATLAB docs.
https://www.mathworks.com/help/signal/ref/fsst.html
"""
def _process_args(Tx, window, cc, cw, win_len, hop_len, n_fft, modulated):
if not modulated:
raise ValueError("inversion with `modulated == False` "
"is unsupported.")
if hop_len != 1:
raise ValueError("inversion with `hop_len != 1` is unsupported.")
cc, cw, full_inverse = _process_component_inversion_args(cc, cw)
n_fft = n_fft or (Tx.shape[0] - 1) * 2
win_len = win_len or n_fft
window = get_window(window, win_len, n_fft=n_fft)
_check_NOLA(window, hop_len)
if abs(np.argmax(window) - len(window)//2) > 1:
WARN("`window` maximum not centered; results may be inaccurate.")
return window, cc, cw, win_len, hop_len, n_fft, full_inverse
(window, cc, cw, win_len, hop_len, n_fft, full_inverse
) = _process_args(Tx, window, cc, cw, win_len, hop_len, n_fft, modulated)
if full_inverse:
# Integration over all frequencies recovers original signal
x = Tx.real.sum(axis=0)
else:
x = _invert_components(Tx, cc, cw)
x *= (2 / window[len(window)//2])
return x
def phase_stft(Sx, dSx, Sfs, gamma=None, parallel=None):
"""Phase transform of STFT:
w[u, k] = Im( k - d/dt(Sx[u, k]) / Sx[u, k] / (j*2pi) )
Defined in Sec. 3 of [1]. Additionally explained in:
https://dsp.stackexchange.com/a/72589/50076
# Arguments:
Sx: np.ndarray
STFT of `x`, where `x` is 1D.
dSx: np.ndarray
Time-derivative of STFT of `x`
Sfs: np.ndarray
Associated physical frequencies, according to `dt` used in `stft`.
Spans 0 to fs/2, linearly.
gamma: float / None
See `help(ssqueezepy.ssq_cwt)`.
# Returns:
w: np.ndarray
Phase transform for each element of `Sx`. w.shape == Sx.shape.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
"""
S.warn_if_tensor_and_par(Sx, parallel)
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Sx, 'complex128') else EPS32)
if S.is_tensor(Sx):
return phase_stft_gpu(Sx, dSx, Sfs, gamma)
return phase_stft_cpu(Sx, dSx, Sfs, gamma, parallel)
def _make_Sfs(Sx, fs):
dtype = 'float32' if 'complex64' in str(Sx.dtype) else 'float64'
n_rows = len(Sx) if Sx.ndim == 2 else Sx.shape[1]
if S.is_tensor(Sx):
Sfs = torch.linspace(0, .5*fs, n_rows, device=Sx.device,
dtype=getattr(torch, dtype))
else:
Sfs = np.linspace(0, .5*fs, n_rows, dtype=dtype)
return Sfs
| 8,660
| 34.207317
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/configs.py
|
# -*- coding: utf-8 -*-
"""
Contains `GDEFAULTS`, global defaults dictionary, set in `ssqueezepy.configs.ini`.
The .ini is parsed into a dict, then values are retrieved internally by functions
via `gdefaults()`, which sets default values if keyword arguments weren't set
to original functions (or were set to `None`).
E.g. calling `wavelets.morlet()`, the function has `mu=None` signature, so `mu`
will be drawn from `configs.ini`, unless calling like `wavelets.morlet(mu=1)`.
"""
import os
import inspect
import logging
logging.basicConfig(format='')
WARN = lambda msg: logging.warning("WARNING: %s" % msg)
path = os.path.join(os.path.dirname(__file__), 'configs.ini')
try:
import torch
import cupy
except:
torch, cupy = None, None
def gdefaults(module_and_obj=None, get_all=False, as_dict=None,
default_order=False, **kw):
"""Fetches default arguments from `ssqueezepy/configs.ini` and fills them
in `kw` where they're None (or always if `get_all=True`). See code comments.
"""
if as_dict is None:
as_dict = bool(get_all)
if module_and_obj is None:
stack = inspect.stack(0) # `(0)` faster than `()`
obj = stack[1][3]
module = stack[1][1].split(os.path.sep)[-1].rstrip('.py')
else:
# may have e.g. `utils.common.obj`
mos = module_and_obj.split('.')
module, obj = '.'.join(mos[:-1]), mos[-1]
# fetch latest
GDEFAULTS = _get_gdefaults()
# if `module` & `obj` are found in `GDEFAULTS`, proceed to write values
# from `GDEFAULTS` onto `kw` if `kw`'s are `None`
# if `get_all=True`, load values from `GDEFAULTS` even if they're not in
# `kw`, but don't overwrite those that are in `kw`.
# if `default_order=True`, will return `kw` with keys sorted as in
# `configs.ini`, for e.g. plotting purposes
if module not in GDEFAULTS:
WARN(f"module {module} not found in GDEFAULTS (see configs.ini)")
elif obj not in GDEFAULTS[module]:
WARN(f"object {obj} not found in GDEFAULTS['{module}'] "
"(see configs.ini)")
else:
DEFAULTS = GDEFAULTS[module][obj]
for key, value in kw.items():
if value is None:
kw[key] = DEFAULTS.get(key, value)
if get_all:
for key, value in DEFAULTS.items():
if key not in kw:
kw[key] = value
if default_order:
# first make a dict with correct order
# then overwrite its values with `kw`'s, without changing order
# if `kw` has keys that `ordered_kw` doesn't, they're inserted at end
ordered_kw = {}
for key, value in DEFAULTS.items():
if key in kw: # `get_all` already accounted for
ordered_kw[key] = value
ordered_kw.update(**kw)
kw = ordered_kw
if as_dict:
return kw
return (kw.values() if len(kw) != 1 else
list(kw.values())[0])
def _get_gdefaults():
"""Global defaults fetched from configs.ini."""
def float_if_number(s):
"""If float works, so should int."""
if isinstance(s, (bool, type(None))):
return s
try:
return float(s)
except ValueError:
return s
def process_special(s):
return {
'None': None,
'True': True,
'False': False,
}.get(s, s)
def process_value(value):
value = value.strip('"').strip("'")
return float_if_number(process_special(value))
with open(path, 'r') as f:
txt = f.read().split('\n')
txt = txt[:txt.index('#### END')]
txt = [line.strip(' ') for line in txt if line != '']
GDEFAULTS = {}
module, obj = '', ''
for line in txt:
if line.startswith('## '):
module = line[3:]
GDEFAULTS[module] = {}
elif line.startswith('# '):
obj = line[2:]
GDEFAULTS[module][obj] = {}
else:
key, value = [s.strip(' ') for s in line.split('=')]
GDEFAULTS[module][obj][key] = process_value(value)
return GDEFAULTS
def IS_PARALLEL():
"""Returns False if 'SSQ_PARALLEL' environment flag was set to '0', or
if `parallel` in `configs.ini` is set to `0`; former overrides latter.
"""
not_par_env = (os.environ.get('SSQ_PARALLEL', '1') == '0')
if not_par_env:
return False
not_par_config = (gdefaults('configs.IS_PARALLEL', parallel=None) == 0)
if not_par_config:
return False
return True
def USE_GPU():
if os.environ.get('SSQ_GPU', '0') == '1':
if torch is None or cupy is None:
raise ValueError("'SSQ_GPU' requires PyTorch and CuPy installed.")
return True
return False
GDEFAULTS = _get_gdefaults()
| 4,853
| 31.145695
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/ssqueezing.py
|
# -*- coding: utf-8 -*-
import numpy as np
from types import FunctionType
from .algos import indexed_sum_onfly, ssqueeze_fast
from .utils import p2up, process_scales, infer_scaletype, _process_fs_and_t
from .utils import NOTE, pi, logscale_transition_idx, assert_is_one_of
from .utils.backend import Q
from .utils.common import WARN
from .utils import backend as S
from .wavelets import center_frequency
def ssqueeze(Wx, w=None, ssq_freqs=None, scales=None, Sfs=None, fs=None, t=None,
squeezing='sum', maprange='maximal', wavelet=None, gamma=None,
was_padded=True, flipud=False, dWx=None, transform='cwt'):
"""Synchrosqueezes the CWT or STFT of `x`.
# Arguments:
Wx or Sx: np.ndarray
CWT or STFT of `x`. CWT is assumed L1-normed, and STFT with
`modulated=True`. If 3D, will treat elements along dim0 as independent
inputs, synchrosqueezing one-by-one (but memory-efficiently).
w: np.ndarray / None
Phase transform of `Wx` or `Sx`. Must be >=0.
If None, `gamma` & `dWx` must be supplied (and `Sfs` for SSQ_STFT).
ssq_freqs: str['log', 'log-piecewise', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
See `help(cwt)` on `'log-piecewise'`.
scales: str['log', 'log-piecewise', 'linear', ...] / np.ndarray
See `help(cwt)`.
Sfs: np.ndarray
Needed if `transform='stft'` and `dWx=None`. See `help(ssq_stft)`.
fs: float / None
Sampling frequency of `x`. Defaults to 1, which makes ssq
frequencies range from 1/dT to 0.5*fs, i.e. as fraction of reference
sampling rate up to Nyquist limit; dT = total duration (N/fs).
Overridden by `t`, if provided.
Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Overrides `fs` if not None.
squeezing: str['sum', 'lebesgue'] / function
- 'sum': summing `Wx` according to `w`. Standard synchrosqueezing.
Invertible.
- 'lebesgue': as in [3], summing `Wx=ones()/len(Wx)`. Effectively,
raw `Wx` phase is synchrosqueezed, independent of `Wx` values. Not
recommended with CWT or STFT with `modulated=True`. Not invertible.
For `modulated=False`, provides a more stable and accurate
representation.
- 'abs': summing `abs(Wx)` according to `w`. Not invertible
(but theoretically possible to get close with least-squares estimate,
so much "more invertible" than 'lebesgue'). Alt to 'lebesgue',
providing same benefits while losing much less information.
Custom function can be used to transform `Wx` arbitrarily for
summation, e.g. `Wx**2` via `lambda x: x**2`. Output shape
must match `Wx.shape`.
maprange: str['maximal', 'peak', 'energy'] / tuple(float, float)
See `help(ssq_cwt)`. Only `'maximal'` supported with STFT.
wavelet: wavelets.Wavelet
Only used if maprange != 'maximal' to compute center frequencies.
See `help(cwt)`.
gamma: float
See `help(ssq_cwt)`.
was_padded: bool (default `rpadded`)
Whether `x` was padded to next power of 2 in `cwt`, in which case
`maprange` is computed differently.
- Used only with `transform=='cwt'`.
- Ignored if `maprange` is tuple.
flipud: bool (default False)
Whether to fill `Tx` equivalently to `flipud(Tx)` (faster & less
memory than calling `Tx = np.flipud(Tx)` afterwards).
dWx: np.ndarray,
Used internally by `ssq_cwt` / `ssq_stft`; must pass when `w` is None.
transform: str['cwt', 'stft']
Whether `Wx` is from CWT or STFT (`Sx`).
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
# References:
1. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
4. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_squeeze.m
"""
def _ssqueeze(Tx, w, Wx, dWx, nv, ssq_freqs, scales, transform, ssq_scaletype,
cwt_scaletype, flipud, gamma, Sfs):
if transform == 'cwt':
# Eq 14 [2]; Eq 2.3 [1]
if cwt_scaletype[:3] == 'log':
# ln(2)/nv == diff(ln(scales))[0] == ln(2**(1/nv))
const = np.log(2) / nv
elif cwt_scaletype == 'linear':
# omit /dw since it's cancelled by *dw in inversion anyway
const = ((scales[1] - scales[0]) / scales).squeeze()
elif transform == 'stft':
const = (ssq_freqs[1] - ssq_freqs[0]) # 'alpha' from [3]
ssq_logscale = ssq_scaletype.startswith('log')
# do squeezing by finding which frequency bin each phase transform point
# w[a, b] lands in (i.e. to which f in ssq_freqs each w[a, b] is closest)
# equivalent to argmin(abs(w[a, b] - ssq_freqs)) for every a, b
# Tx[k[i, j], j] += Wx[i, j] * norm -- (see below method's docstring)
if w is None:
ssqueeze_fast(Wx, dWx, ssq_freqs, const, ssq_logscale, flipud,
gamma, out=Tx, Sfs=Sfs)
else:
indexed_sum_onfly(Wx, w, ssq_freqs, const, ssq_logscale, flipud,
out=Tx)
def _process_args(Wx, w, fs, t, transform, squeezing, scales, maprange,
wavelet, dWx):
if w is None and (dWx is None or gamma is None):
raise ValueError("if `w` is None, `dWx` and `gamma` must not be.")
elif w is not None and w.min() < 0:
raise ValueError("found negatives in `w`")
_check_ssqueezing_args(squeezing, maprange, transform=transform,
wavelet=wavelet)
if scales is None and transform == 'cwt':
raise ValueError("`scales` can't be None if `transform == 'cwt'`")
N = Wx.shape[-1]
dt, *_ = _process_fs_and_t(fs, t, N)
return N, dt
N, dt = _process_args(Wx, w, fs, t, transform, squeezing, scales,
maprange, wavelet, dWx)
if transform == 'cwt':
scales, cwt_scaletype, _, nv = process_scales(scales, N, get_params=True)
else:
cwt_scaletype, nv = None, None
# handle `ssq_freqs` & `ssq_scaletype`
if not (isinstance(ssq_freqs, np.ndarray) or S.is_tensor(ssq_freqs)):
if isinstance(ssq_freqs, str):
ssq_scaletype = ssq_freqs
else:
# default to same scheme used by `scales`
ssq_scaletype = cwt_scaletype
if ((maprange == 'maximal' or isinstance(maprange, tuple)) and
ssq_scaletype == 'log-piecewise'):
raise ValueError("can't have `ssq_scaletype = log-piecewise` or "
"tuple with `maprange = 'maximal'` "
"(got %s)" % str(maprange))
ssq_freqs = _compute_associated_frequencies(
scales, N, wavelet, ssq_scaletype, maprange, was_padded, dt,
transform)
elif transform == 'stft':
# removes warning per issue with `infer_scaletype`
# future TODO: shouldn't need this
ssq_scaletype = 'linear'
else:
ssq_scaletype, _ = infer_scaletype(ssq_freqs)
# transform `Wx` if needed
if isinstance(squeezing, FunctionType):
Wx = squeezing(Wx)
elif squeezing == 'lebesgue': # from reference [3]
Wx = S.ones(Wx.shape, dtype=Wx.dtype) / len(Wx)
elif squeezing == 'abs':
Wx = Q.abs(Wx)
# synchrosqueeze
Tx = S.zeros(Wx.shape, dtype=Wx.dtype)
args = (nv, ssq_freqs, scales, transform, ssq_scaletype,
cwt_scaletype, flipud, gamma, Sfs)
if Wx.ndim == 2:
_ssqueeze(Tx, w, Wx, dWx, *args)
elif Wx.ndim == 3:
w, dWx = [(g if g is not None else [None]*len(Tx))
for g in (w, dWx)]
for _Tx, _w, _Wx, _dWx in zip(Tx, w, Wx, dWx):
_ssqueeze(_Tx, _w, _Wx, _dWx, *args)
# `scales` go high -> low
if (transform == 'cwt' and not flipud) or flipud:
if not isinstance(ssq_freqs, np.ndarray):
import torch
ssq_freqs = torch.flip(ssq_freqs, (0,))
else:
ssq_freqs = ssq_freqs[::-1]
return Tx, ssq_freqs
#### `ssqueeze` utils ########################################################
def _ssq_freqrange(maprange, dt, N, wavelet, scales, was_padded):
if isinstance(maprange, tuple):
fm, fM = maprange
elif maprange == 'maximal':
dT = dt * N
# normalized frequencies to map discrete-domain to physical:
# f[[cycles/samples]] -> f[[cycles/second]]
# minimum measurable (fundamental) frequency of data
fm = 1 / dT
# maximum measurable (Nyquist) frequency of data
fM = 1 / (2 * dt)
elif maprange in ('peak', 'energy'):
kw = dict(wavelet=wavelet, N=N, maprange=maprange, dt=dt,
was_padded=was_padded)
fm = _get_center_frequency(**kw, scale=scales[-1])
fM = _get_center_frequency(**kw, scale=scales[0])
return fm, fM
def _compute_associated_frequencies(scales, N, wavelet, ssq_scaletype, maprange,
was_padded=True, dt=1, transform='cwt'):
fm, fM = _ssq_freqrange(maprange, dt, N, wavelet, scales, was_padded)
na = len(scales)
# frequency divisions `w_l` to reassign to in Synchrosqueezing
if ssq_scaletype == 'log':
# [fm, ..., fM]
ssq_freqs = fm * np.power(fM / fm, np.arange(na)/(na - 1))
elif ssq_scaletype == 'log-piecewise':
idx = logscale_transition_idx(scales)
if idx is None:
ssq_freqs = fm * np.power(fM / fm, np.arange(na)/(na - 1))
else:
f0, f2 = fm, fM
# note that it's possible for f1 == f0 per discretization limitations,
# in which case `sqf1` will contain the same value repeated
f1 = _get_center_frequency(wavelet, N, maprange, dt, scales[idx],
was_padded)
# here we don't know what the pre-downsampled `len(scales)` was,
# so we take a longer route by piecewising respective center freqs
t1 = np.arange(0, na - idx - 1)/(na - 1)
t2 = np.arange(na - idx - 1, na)/(na - 1)
# simulates effect of "endpoint" since we'd need to know `f2`
# with `endpoint=False`
t1 = np.hstack([t1, t2[0]])
sqf1 = _exp_fm(t1, f0, f1)[:-1]
sqf2 = _exp_fm(t2, f1, f2)
ssq_freqs = np.hstack([sqf1, sqf2])
ssq_idx = logscale_transition_idx(ssq_freqs)
if ssq_idx is None:
raise Exception("couldn't find logscale transition index of "
"generated `ssq_freqs`; something went wrong")
assert (na - ssq_idx) == idx, "{} != {}".format(na - ssq_idx, idx)
else:
if transform == 'cwt':
ssq_freqs = np.linspace(fm, fM, na)
elif transform == 'stft':
ssq_freqs = np.linspace(0, .5, na) / dt
return ssq_freqs
def _exp_fm(t, fmin, fmax):
tmin, tmax = t.min(), t.max()
a = (fmin**tmax / fmax**tmin) ** (1/(tmax - tmin))
b = fmax**(1/tmax) * (1/a)**(1/tmax)
return a*b**t
def _get_center_frequency(wavelet, N, maprange, dt, scale, was_padded):
if was_padded:
N = p2up(N)[0]
kw = dict(wavelet=wavelet, N=N, scale=scale, kind=maprange)
if maprange == 'energy':
kw['force_int'] = True
wc = center_frequency(**kw)
fc = wc / (2*pi) / dt
return fc
#### misc ####################################################################
def _check_ssqueezing_args(squeezing, maprange=None, wavelet=None, difftype=None,
difforder=None, get_w=None, transform='cwt'):
if transform not in ('cwt', 'stft'):
raise ValueError("`transform` must be one of: cwt, stft "
"(got %s)" % squeezing)
if not isinstance(squeezing, (str, FunctionType)):
raise TypeError("`squeezing` must be string or function "
"(got %s)" % type(squeezing))
elif isinstance(squeezing, str):
assert_is_one_of(squeezing, 'squeezing', ('sum', 'lebesgue', 'abs'))
# maprange
if maprange is not None:
if isinstance(maprange, (tuple, list)):
if not all(isinstance(m, (float, int)) for m in maprange):
raise ValueError("all elements of `maprange` must be "
"float or int")
elif isinstance(maprange, str):
assert_is_one_of(maprange, 'maprange', ('maximal', 'peak', 'energy'))
else:
raise TypeError("`maprange` must be str, tuple, or list "
"(got %s)" % type(maprange))
if isinstance(maprange, str) and maprange != 'maximal':
if transform != 'cwt':
NOTE("string `maprange` currently only functional with "
"`transform='cwt'`")
elif wavelet is None:
raise ValueError(f"maprange='{maprange}' requires `wavelet`")
# difftype
if difftype is not None:
if difftype not in ('trig', 'phase', 'numeric'):
raise ValueError("`difftype` must be one of: direct, phase, numeric"
" (got %s)" % difftype)
elif difftype != 'trig':
from .configs import USE_GPU
if USE_GPU():
raise ValueError("GPU computation only supports "
"`difftype = 'trig'`")
elif not get_w:
raise ValueError("`difftype != 'trig'` requires `get_w = True`")
# difforder
if difforder is not None:
if difftype != 'numeric':
WARN("`difforder` is ignored if `difftype != 'numeric'")
elif difforder not in (1, 2, 4):
raise ValueError("`difforder` must be one of: 1, 2, 4 "
"(got %s)" % difforder)
elif difftype == 'numeric':
difforder = 4
return difforder
| 15,556
| 41.159892
| 82
|
py
|
ssqueezepy
|
ssqueezepy-master/ssqueezepy/algos.py
|
# -*- coding: utf-8 -*-
"""CPU- & GPU-accelerated routines, and few neat algorithms.
"""
import numpy as np
from numba import jit, prange
from functools import reduce
from .utils.backend import asnumpy, cp, torch
from .utils.gpu_utils import _run_on_gpu, _get_kernel_params
from .utils import backend as S
from .configs import IS_PARALLEL
def nCk(n, k):
"""n-Choose-k"""
mul = lambda a, b: a * b
r = min(k, n - k)
numer = reduce(mul, range(n, n - r, -1), 1)
denom = reduce(mul, range(1, r + 1), 1)
return numer / denom
#### `indexed_sum` ###########################################################
def indexed_sum(a, k, parallel=None):
"""Sum `a` into rows of 2D array according to indices given by 2D `k`."""
out = np.zeros(a.shape, dtype=a.dtype)
if parallel or (parallel is None and IS_PARALLEL()):
_indexed_sum_par(a, k, out)
else:
_indexed_sum(a, k, out)
return out
@jit(nopython=True, cache=True)
def _indexed_sum(a, k, out):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
out[k[i, j], j] += a[i, j]
@jit(nopython=True, cache=True, parallel=True)
def _indexed_sum_par(a, k, out):
for j in prange(a.shape[1]):
for i in range(a.shape[0]):
out[k[i, j], j] += a[i, j]
def _process_ssq_params(Wx, w_or_dWx, ssq_freqs, const, logscale, flipud, out,
gamma, parallel, complex_out=True, Sfs=None):
S.warn_if_tensor_and_par(Wx, parallel)
gpu = S.is_tensor(Wx)
parallel = (parallel or IS_PARALLEL()) and not gpu
# process `Wx`, `w_or_dWx`, `out`
if out is None:
out_shape = (*Wx.shape, 2) if (gpu and complex_out) else Wx.shape
if gpu:
out_dtype = (torch.float32 if Wx.dtype == torch.complex64 else
torch.float64)
out = torch.zeros(out_shape, dtype=out_dtype, device=Wx.device)
else:
out = np.zeros(out_shape, dtype=Wx.dtype)
elif complex_out and gpu:
out = torch.view_as_real(out)
if gpu:
Wx = torch.view_as_real(Wx)
if 'complex' in str(w_or_dWx.dtype):
w_or_dWx = torch.view_as_real(w_or_dWx)
# process `const`
len_const = (const.numel() if isinstance(const, torch.Tensor) else
(const.size if isinstance(const, np.ndarray) else 1))
if len_const != len(Wx):
if gpu:
const_arr = torch.full((len(Wx),), fill_value=const,
device=Wx.device, dtype=Wx.dtype)
else:
const_arr = np.full(len(Wx), const, dtype=Wx.dtype)
elif gpu and isinstance(const, np.ndarray):
const_arr = torch.as_tensor(const, dtype=Wx.dtype, device=Wx.device)
else:
const_arr = const
const_arr = const_arr.squeeze()
# process other constants
if logscale:
_, params = _get_params_find_closest_log(ssq_freqs)
else:
dv = float(ssq_freqs[1] - ssq_freqs[0])
dv = _ensure_nonzero_nonnegative('dv', dv)
params = dict(vmin=float(ssq_freqs[0]), dv=dv)
if gpu:
# process kernel params
(blockspergrid, threadsperblock, kernel_kw, str_dtype
) = _get_kernel_params(Wx, dim=1)
M = kernel_kw['M']
kernel_kw.update(dict(f='f' if kernel_kw['dtype'] == 'float' else '',
extra=f"k = {M} - 1 - k;" if flipud else ""))
# collect tensors & constants
if 'idx1' in params:
params['idx1'] = int(params['idx1'])
kernel_args = [Wx.data_ptr(), w_or_dWx.data_ptr(), out.data_ptr(),
const_arr.data_ptr(), *list(params.values())]
if gamma is not None:
kernel_args.insert(4, cp.asarray(gamma, dtype=str_dtype))
if Sfs is not None:
kernel_args.insert(2, Sfs.data_ptr())
ssq_scaletype = (('log_piecewise' if 'idx1' in params else 'log')
if logscale else 'lin')
else:
# cpu function params
params.update(dict(const=const_arr, flipud=flipud, omax=len(out) - 1))
if gamma is not None:
params['gamma'] = gamma
if Sfs is not None:
params['Sfs'] = Sfs
ssq_scaletype = (('log_piecewise' if 'idx1' in params else 'log')
if logscale else 'lin')
ssq_scaletype += '_par' if parallel else ''
if gpu:
args = (blockspergrid, threadsperblock, *kernel_args)
return (out, params, args, kernel_kw, ssq_scaletype)
return (Wx, w_or_dWx, out, params, ssq_scaletype)
def ssqueeze_fast(Wx, dWx, ssq_freqs, const, logscale=False, flipud=False,
gamma=None, out=None, Sfs=None, parallel=None):
"""`indexed_sum`, `find_closest`, and `phase_transform` within same loop,
sparing two arrays and intermediate elementwise conditionals; see
`help(algos.find_closest)` on how `k` is computed.
"""
def fn_name(transform, ssq_scaletype):
return ('ssq_stft' if transform == 'stft' else
f'ssq_cwt_{ssq_scaletype}')
outs = _process_ssq_params(Wx, dWx, ssq_freqs, const, logscale, flipud, out,
gamma, parallel, complex_out=True, Sfs=Sfs)
transform = 'cwt' if Sfs is None else 'stft'
if S.is_tensor(Wx):
out, params, args, kernel_kw, ssq_scaletype = outs
kernel = _kernel_codes[fn_name(transform, ssq_scaletype)]
_run_on_gpu(kernel, *args, **kernel_kw)
out = torch.view_as_complex(out)
else:
Wx, dWx, out, params, ssq_scaletype = outs
fn = _cpu_fns[fn_name(transform, ssq_scaletype)]
args = ([Wx, dWx, out] if transform == 'cwt' else
[Wx, dWx, params.pop('Sfs'), out])
fn(*args, **params)
return out
def indexed_sum_onfly(Wx, w, ssq_freqs, const=1, logscale=False, flipud=False,
out=None, parallel=None):
"""`indexed_sum` and `find_closest` within same loop, sparing an array;
see `help(algos.find_closest)` on how `k` is computed.
"""
outs = _process_ssq_params(Wx, w, ssq_freqs, const, logscale, flipud, out,
gamma=None, parallel=parallel, complex_out=True)
if S.is_tensor(Wx):
out, params, args, kernel_kw, ssq_scaletype = outs
kernel = _kernel_codes[f'indexed_sum_{ssq_scaletype}']
_run_on_gpu(kernel, *args, **kernel_kw)
out = torch.view_as_complex(out)
else:
Wx, w, out, params, ssq_scaletype = outs
fn = _cpu_fns[f'indexed_sum_{ssq_scaletype}']
fn(Wx, w, out, **params)
return out
@jit(nopython=True, cache=True)
def _indexed_sum_log(Wx, w, out, const, vlmin, dvl, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if np.isinf(w[i, j]):
continue
k = int(min(round(max((np.log2(w[i, j]) - vlmin) / dvl, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _indexed_sum_log_par(Wx, w, out, const, vlmin, dvl, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if np.isinf(w[i, j]):
continue
k = int(min(round(max((np.log2(w[i, j]) - vlmin) / dvl, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _indexed_sum_log_piecewise(Wx, w, out, const, vlmin0, vlmin1, dvl0, dvl1,
idx1, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if np.isinf(w[i, j]):
continue
wl = np.log2(w[i, j])
if wl > vlmin1:
k = int(min(round((wl - vlmin1) / dvl1) + idx1, omax))
else:
k = int(round(max((wl - vlmin0) / dvl0, 0)))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _indexed_sum_log_piecewise_par(Wx, w, out, const, vlmin0, vlmin1, dvl0, dvl1,
idx1, omax, flipud=False):
# it's also possible to construct the if-else logic in terms of mappables
# of `vlmin`, `dvl`, and `idx`, which generalizes to any number of transitions
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if np.isinf(w[i, j]):
continue
wl = np.log2(w[i, j])
if wl > vlmin1:
k = int(min(round((wl - vlmin1) / dvl1) + idx1, omax))
else:
k = int(round(max((wl - vlmin0) / dvl0, 0)))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _indexed_sum_lin(Wx, w, out, const, vmin, dv, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if np.isinf(w[i, j]):
continue
k = int(min(round(max((w[i, j] - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _indexed_sum_lin_par(Wx, w, out, const, vmin, dv, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if np.isinf(w[i, j]):
continue
k = int(min(round(max((w[i, j] - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
#### `find_closest` algorithms ###############################################
def find_closest(a, v, logscale=False, parallel=None, smart=None):
"""`argmin(abs(a[i, j] - v)) for all `i, j`; `a` is 2D, `v` is 1D.
# Arguments:
a: np.ndarray
2D array.
v: np.ndarray
1D array.
logscale: bool (default False)
Whether "closest" is taken in linear or logarithmic space.
parallel: bool (default True) / None
Whether to use algorithms with `numba.jit(parallel=True)`
smart: bool (default False) / None
Whether to use a very fast smart algorithm (but still the slowest
for ssqueezing; see usage guide below).
Credit: Divakar -- https://stackoverflow.com/a/64526158/10133797
____________________________________________________________________________
**Default behavior**
If only `a` & `v` are passed, `find_closest_smart` is called.
____________________________________________________________________________
**Usage guide**
If 100% accuracy is desired, or `v` is not linearly or logarithmically
distributed, use `find_closest_smart` (`smart=True`) or `find_closest_brute`
(not callable from here).
`_smart` is faster on single CPU thread, but `_brute` can win
via parallelism.
Else, `find_closest_lin` and `find_closest_log` do the trick (the special
case of log-piecewise is handled), and are much faster.
- Relative to "exact", they differ only by 0% to 0.0001%, purely per
float precision limitations, and never by more than one index in `out`
(where whether e.g. `w=0.500000001` belongs to 0 or 1 isn't statistically
meaningful to begin with).
____________________________________________________________________________
**How it works:** `find_closest_log`, `find_closest_lin`
The root assumption is that `v` is uniformly (in linear or log space)
distributed, and we calculate analytically in which bin `w` will land as:
`(w - bin_min) / bin_step_size`
Above is forced to bound in [0, len(v) - 1].
"""
assert not S.is_tensor(a), "`find_closest` doesn't support GPU execution"
if smart is None and parallel is None:
smart = True
elif parallel and smart:
WARN("find_closest: `smart` overrides `parallel`")
if smart:
if logscale:
out = find_closest_smart(np.log2(a), np.log2(v))
else:
out = find_closest_smart(a, v)
elif logscale:
out = find_closest_log(a, v, parallel=parallel)
else:
out = find_closest_lin(a, v, parallel=parallel)
return out
@jit(nopython=True, cache=True, parallel=True)
def find_closest_brute(a, v):
"""Computes exactly but exhaustively."""
out = np.zeros(a.shape, dtype=np.int32)
for i in prange(a.shape[0]):
for j in prange(a.shape[1]):
out[i, j] = np.argmin(np.abs(a[i, j] - v))
return out
def find_closest_smart(a, v):
"""Equivalent to argmin(abs(a[i, j] - v)) for all i, j; a is 2D, v is 1D.
Credit: Divakar -- https://stackoverflow.com/a/64526158/10133797
"""
sidx = v.argsort()
v_s = v[sidx]
idx = np.searchsorted(v_s, a)
idx[idx == len(v)] = len(v) - 1
idx0 = (idx - 1).clip(min=0)
m = np.abs(a - v_s[idx]) >= np.abs(v_s[idx0] - a)
m[idx == 0] = 0
idx[m] -= 1
out = sidx[idx]
return out
def _ensure_nonzero_nonnegative(name, x, silent=False):
if x < EPS64:
if not silent:
WARN("computed `%s` (%.2e) is below EPS64; will set to " % (name, x)
+ "EPS64. Advised to check `ssq_freqs`.")
x = EPS64
return x
def _get_params_find_closest_log(v):
idx = logscale_transition_idx(v)
vlmin = float(np.log2(v[0]))
if idx is None:
dvl = float(np.log2(v[1]) - np.log2(v[0]))
dvl = _ensure_nonzero_nonnegative('dvl', dvl)
params = dict(vlmin=vlmin, dvl=dvl)
else:
vlmin0, vlmin1 = vlmin, float(np.log2(v[idx - 1]))
dvl0 = float(np.log2(v[1]) - np.log2(v[0]))
dvl1 = float(np.log2(v[idx]) - np.log2(v[idx - 1]))
# see comment above `f1` in `ssqueezing._compute_associated_frequencies`
dvl0 = _ensure_nonzero_nonnegative('dvl0', dvl0, silent=True)
dvl1 = _ensure_nonzero_nonnegative('dvl1', dvl1)
idx1 = np.asarray(idx - 1, dtype=np.int32)
params = dict(vlmin0=vlmin0, vlmin1=vlmin1, dvl0=dvl0, dvl1=dvl1,
idx1=idx1)
return idx, params
def find_closest_log(a, v, parallel=True):
idx, params = _get_params_find_closest_log(v)
out = np.zeros(a.shape, dtype=np.int32)
params['omax'] = len(out) - 1
if idx is None:
fn = _find_closest_log_par if parallel else _find_closest_log
else:
fn = (_find_closest_log_piecewise_par if parallel else
_find_closest_log_piecewise)
fn(a, out, **params)
return out
@jit(nopython=True, cache=True)
def _find_closest_log(a, out, vlmin, dvl, omax):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
out[i, j] = min(round(max((np.log2(a[i, j]) - vlmin) / dvl, 0)), omax)
@jit(nopython=True, cache=True, parallel=True)
def _find_closest_log_par(a, out, vlmin, dvl, omax):
for i in prange(a.shape[0]):
for j in prange(a.shape[1]):
out[i, j] = min(round(max((np.log2(a[i, j]) - vlmin) / dvl, 0)), omax)
@jit(nopython=True, cache=True)
def _find_closest_log_piecewise(a, out, vlmin0, vlmin1, dvl0, dvl1, idx1,
omax):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
al = np.log2(a[i, j])
if al > vlmin1:
out[i, j] = min(round((al - vlmin1) / dvl1) + idx1, omax)
else:
out[i, j] = round(max((al - vlmin0) / dvl0, 0))
@jit(nopython=True, cache=True, parallel=True)
def _find_closest_log_piecewise_par(a, out, vlmin0, vlmin1, dvl0, dvl1, idx1,
omax):
# it's also possible to construct the if-else logic in terms of mappables
# of `vlmin`, `dvl`, and `idx`, which generalizes to any number of transitions
for i in prange(a.shape[0]):
for j in prange(a.shape[1]):
if np.isinf(a[i, j]):
continue
al = np.log2(a[i, j])
if al > vlmin1:
out[i, j] = min(round((al - vlmin1) / dvl1) + idx1, omax)
else:
out[i, j] = round(max((al - vlmin0) / dvl0, 0))
def find_closest_lin(a, v, parallel=True):
vmin = v[0]
dv = v[1] - v[0]
out = np.zeros(a.shape, dtype=np.int32)
fn = _find_closest_lin_par if parallel else _find_closest_lin
fn(a, out, vmin, dv, omax=len(out) - 1)
return out
@jit(nopython=True, cache=True)
def _find_closest_lin(a, out, vmin, dv, omax):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
out[i, j] = min(round(max((a[i, j] - vmin) / dv, 0)), omax)
@jit(nopython=True, cache=True, parallel=True)
def _find_closest_lin_par(a, out, vmin, dv, omax):
for i in prange(a.shape[0]):
for j in prange(a.shape[1]):
out[i, j] = min(round(max((a[i, j] - vmin) / dv, 0)), omax)
#### Replacers ###############################################################
def _process_replace_fn_args(x, ref):
if ref is None:
ref = x
xndim = x.ndim # store original ndim to undo expansion later
if not (isinstance(x, np.ndarray) and isinstance(ref, np.ndarray)):
raise TypeError("inputs must be numpy arrays "
"(got %s, %s)" % (type(x), type(ref)))
while x.ndim < 3:
x = np.expand_dims(x, -1)
while ref.ndim < 3:
ref = np.expand_dims(ref, -1)
if x.ndim > 3 or ref.ndim > 3:
raise ValueError("inputs must be 1D, 2D, or 3D numpy arrays "
"(got x.ndim==%d, ref.ndim==%d)" % (x.ndim, ref.ndim))
return x, ref, xndim
def replace_at_inf_or_nan(x, ref=None, replacement=0.):
x, ref, xndim = _process_replace_fn_args(x, ref)
x = _replace_at_inf_or_nan(x, ref, replacement)
while x.ndim > xndim:
x = x.squeeze(axis=-1)
return x
def replace_at_inf(x, ref=None, replacement=0.):
x, ref, xndim = _process_replace_fn_args(x, ref)
x = _replace_at_inf(x, ref, replacement)
while x.ndim > xndim:
x = x.squeeze(axis=-1)
return x
def replace_at_nan(x, ref=None, replacement=0.):
x, ref, xndim = _process_replace_fn_args(x, ref)
x = _replace_at_nan(x, ref, replacement)
while x.ndim > xndim:
x = x.squeeze(axis=-1)
return x
def replace_at_value(x, ref=None, value=0., replacement=0.):
"""Note: `value=np.nan` won't work (but np.inf will, separate from -np.inf)"""
x, ref, xndim = _process_replace_fn_args(x, ref)
x = _replace_at_value(x, ref, value, replacement)
while x.ndim > xndim:
x = x.squeeze(axis=-1)
return x
def replace_under_abs(x, ref=None, value=0., replacement=0., parallel=None):
if S.is_tensor(x):
_replace_under_abs_gpu(x, ref, value, replacement)
elif parallel or (parallel is None and IS_PARALLEL()):
_replace_under_abs_par(x, ref, value, replacement)
else:
_replace_under_abs(x, ref, value, replacement)
# TODO return None?
@jit(nopython=True, cache=True)
def _replace_at_inf_or_nan(x, ref, replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
if np.isinf(ref[i, j, k]) or np.isnan(ref[i, j, k]):
x[i, j, k] = replacement
return x
@jit(nopython=True, cache=True)
def _replace_at_inf(x, ref, replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
if np.isinf(ref[i, j, k]):
x[i, j, k] = replacement
return x
@jit(nopython=True, cache=True)
def _replace_at_nan(x, ref, replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
if np.isnan(ref[i, j, k]):
x[i, j, k] = replacement
return x
@jit(nopython=True, cache=True)
def _replace_at_value(x, ref, value=0., replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
if ref[i, j, k] == value:
x[i, j, k] = replacement
return x
@jit(nopython=True, cache=True)
def _replace_under_abs(x, ref, value=0., replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if abs(ref[i, j]) < value:
x[i, j] = replacement
@jit(nopython=True, cache=True, parallel=True)
def _replace_under_abs_par(x, ref, value=0., replacement=0.):
for i in prange(x.shape[0]):
for j in prange(x.shape[1]):
if abs(ref[i, j]) < value:
x[i, j] = replacement
def _replace_under_abs_gpu(w, Wx, value=0., replacement=0.):
"""Not as general as CPU variants (namely `w` must be real and `Wx`
must be complex).
"""
kernel = '''
extern "C" __global__
void replace_under_abs(${dtype} w[${M}][${N}],
${dtype} Wx[${M}][${N}][2],
${dtype} *value, ${dtype} *replacement)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= ${M} || j >= ${N})
return;
if (norm${f}(2, Wx[i][j]) < *value)
w[i][j] = *replacement;
}
'''
(blockspergrid, threadsperblock, kernel_kw, str_dtype
) = _get_kernel_params(Wx, dim=2)
kernel_kw['f'] = 'f' if kernel_kw['dtype'] == 'float' else ''
Wx = torch.view_as_real(Wx)
kernel_args = [w.data_ptr(), Wx.data_ptr(),
cp.asarray(value, dtype=str_dtype),
cp.asarray(replacement, dtype=str_dtype)]
_run_on_gpu(kernel, blockspergrid, threadsperblock,
*kernel_args, **kernel_kw)
def zero_denormals(x, parallel=None):
"""Denormals are very small non-zero numbers that can significantly slow CPU
execution (e.g. FFT). See https://github.com/scipy/scipy/issues/13764
"""
# take a little bigger than smallest, seems to improve FFT speed
parallel = parallel if parallel is not None else IS_PARALLEL()
tiny = 1000 * np.finfo(x.dtype).tiny
fn = _zero_denormals_par if parallel else _zero_denormals
fn(x.ravel(), tiny)
@jit(nopython=True, cache=True)
def _zero_denormals(x, tiny):
for i in range(x.size):
if x[i] < tiny and x[i] > -tiny:
x[i] = 0
@jit(nopython=True, cache=True, parallel=True)
def _zero_denormals_par(x, tiny):
for i in prange(x.size):
if x[i] < tiny and x[i] > -tiny:
x[i] = 0
#### misc (short) ############################################################
@jit(nopython=True, cache=True)
def _min_neglect_idx(arr, th=1e-12):
"""Used in utils.integrate_analytic and ._integrate_bounded."""
for i, x in enumerate(arr):
if x < th:
return i
return i
#### misc (long) #############################################################
def find_maximum(fn, step_size=1e-3, steps_per_search=1e4, step_start=0,
step_limit=1000, min_value=-1):
"""Finds max of any function with a single maximum, and input value
at which the maximum occurs. Inputs and outputs must be 1D.
Must be strictly non-decreasing from step_start up to maximum of interest.
Takes absolute value of fn's outputs.
"""
steps_per_search = int(steps_per_search)
largest_max = min_value
increment = int(steps_per_search * step_size)
input_values = np.linspace(step_start, increment)
output_values = -1 * np.ones(steps_per_search)
search_idx = 0
while True:
start = step_start + increment * search_idx
end = start + increment
input_values = np.linspace(start, end, steps_per_search, endpoint=False)
output_values[:] = np.abs(asnumpy(fn(input_values)))
output_max = output_values.max()
if output_max > largest_max:
largest_max = output_max
input_value = input_values[np.argmax(output_values)]
elif output_max < largest_max:
break
search_idx += 1
if input_values.max() > step_limit:
raise ValueError(("could not find function maximum with given "
"(step_size, steps_per_search, step_start, "
"step_limit, min_value)=({}, {}, {}, {}, {})"
).format(step_size, steps_per_search, step_start,
step_limit, min_value))
return input_value, largest_max
def find_first_occurrence(fn, value, step_size=1e-3, steps_per_search=1e4,
step_start=0, step_limit=1000):
"""Finds earliest input value for which `fn(input_value) == value`, searching
from `step_start` to `step_limit` in `step_size` increments.
Takes absolute value of fn's outputs.
"""
steps_per_search = int(steps_per_search)
increment = int(steps_per_search * step_size)
output_values = -1 * np.ones(steps_per_search)
step_limit_exceeded = False
search_idx = 0
while True:
start = step_start + increment * search_idx
end = start + increment
input_values = np.linspace(start, end, steps_per_search, endpoint=False)
if input_values.max() > step_limit:
step_limit_exceeded = True
input_values = np.clip(input_values, None, step_limit)
output_values[:] = np.abs(asnumpy(fn(input_values)))
mxdiff = np.abs(np.diff(output_values)).max()
# more reliable than `argmin not in (0, len - 1)` for smooth `fn`
if np.any(np.abs(output_values - value) <= mxdiff):
idx = np.argmin(np.abs(output_values - value))
break
search_idx += 1
if step_limit_exceeded:
raise ValueError(("could not find input value to yield function "
f"output value={value} with given "
"(step_size, steps_per_search, step_start, "
"step_limit, min_value)=({}, {}, {}, {})"
).format(step_size, steps_per_search,
step_start, step_limit))
input_value = input_values[idx]
output_value = output_values[idx]
return input_value, output_value
def phase_cwt_cpu(Wx, dWx, gamma, parallel=None):
"""Computes only the imaginary part of `dWx / Wx` while dividing by 2*pi
in same operation; doesn't compute division at all if `abs(Wx) < gamma`.
Less memory & less computation than `(dWx / Wx).imag / (2*pi)`, same result.
"""
dtype = 'float32' if Wx.dtype == np.complex64 else 'float64'
out = np.zeros(Wx.shape, dtype=dtype)
gamma = np.asarray(gamma, dtype=dtype)
parallel = parallel or IS_PARALLEL()
fn = _phase_cwt_par if parallel else _phase_cwt
fn(Wx, dWx, out, gamma)
return out
@jit(nopython=True, cache=True)
def _phase_cwt(Wx, dWx, out, gamma):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) < gamma:
out[i, j] = np.inf
else:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
out[i, j] = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
@jit(nopython=True, cache=True, parallel=True)
def _phase_cwt_par(Wx, dWx, out, gamma):
for i in prange(Wx.shape[0]):
for j in prange(Wx.shape[1]):
if abs(Wx[i, j]) < gamma:
out[i, j] = np.inf
else:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
out[i, j] = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
def phase_cwt_gpu(Wx, dWx, gamma):
"""Same as `phase_cwt_cpu`, but on GPU."""
kernel = '''
extern "C" __global__
void phase_cwt(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}],
${dtype} *gamma) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= ${M} || j >= ${N})
return;
if (norm${f}(2, Wx[i][j]) < *gamma){
out[i][j] = 1.0/0.0;
return;
}
${dtype} A = dWx[i][j][0];
${dtype} B = dWx[i][j][1];
${dtype} C = Wx[i][j][0];
${dtype} D = Wx[i][j][1];
out[i][j] = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
}
'''
(blockspergrid, threadsperblock, kernel_kw, str_dtype
) = _get_kernel_params(Wx, dim=2)
kernel_kw['f'] = 'f' if kernel_kw['dtype'] == 'float' else ''
out = torch.zeros(Wx.shape, device=Wx.device, dtype=getattr(torch, str_dtype))
Wx = torch.view_as_real(Wx)
dWx = torch.view_as_real(dWx)
kernel_args = [Wx.data_ptr(), dWx.data_ptr(), out.data_ptr(),
cp.asarray(gamma, dtype=str_dtype)]
_run_on_gpu(kernel, blockspergrid, threadsperblock,
*kernel_args, **kernel_kw)
return out
def phase_stft_cpu(Wx, dWx, Sfs, gamma, parallel=None):
dtype = 'float32' if Wx.dtype == np.complex64 else 'float64'
out = np.zeros(Wx.shape, dtype=dtype)
gamma = np.asarray(gamma, dtype=dtype)
parallel = parallel or IS_PARALLEL()
fn = _phase_stft_par if parallel else _phase_stft
fn(Wx, dWx, Sfs, out, gamma)
return out
@jit(nopython=True, cache=True)
def _phase_stft(Wx, dWx, Sfs, out, gamma):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) < gamma:
out[i, j] = np.inf
else:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
out[i, j] = abs(
Sfs[i] - (B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
@jit(nopython=True, cache=True, parallel=True)
def _phase_stft_par(Wx, dWx, Sfs, out, gamma):
for i in prange(Wx.shape[0]):
for j in prange(Wx.shape[1]):
if abs(Wx[i, j]) < gamma:
out[i, j] = np.inf
else:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
out[i, j] = abs(
Sfs[i] - (B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
def phase_stft_gpu(Wx, dWx, Sfs, gamma):
kernel = '''
extern "C" __global__
void phase_stft(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} Sfs[${M}],
${dtype} out[${M}][${N}],
${dtype} *gamma) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= ${M} || j >= ${N})
return;
if (norm${f}(2, Wx[i][j]) < *gamma){
out[i][j] = 1.0/0.0;
return;
}
${dtype} A = dWx[i][j][0];
${dtype} B = dWx[i][j][1];
${dtype} C = Wx[i][j][0];
${dtype} D = Wx[i][j][1];
out[i][j] = abs(Sfs[i] - (B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
}
'''
(blockspergrid, threadsperblock, kernel_kw, str_dtype
) = _get_kernel_params(Wx, dim=2)
kernel_kw['f'] = 'f' if kernel_kw['dtype'] == 'float' else ''
out = torch.zeros(Wx.shape, device=Wx.device, dtype=getattr(torch, str_dtype))
Wx = torch.view_as_real(Wx)
dWx = torch.view_as_real(dWx)
kernel_args = [Wx.data_ptr(), dWx.data_ptr(), Sfs.data_ptr(), out.data_ptr(),
cp.asarray(gamma, dtype=str_dtype)]
_run_on_gpu(kernel, blockspergrid, threadsperblock,
*kernel_args, **kernel_kw)
return out
@jit(nopython=True, cache=True)
def _ssq_cwt_log_piecewise(Wx, dWx, out, const, gamma, vlmin0, vlmin1,
dvl0, dvl1, idx1, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
wl = np.log2(w_ij)
if wl > vlmin1:
k = int(min(round((wl - vlmin1) / dvl1) + idx1, omax))
else:
k = int(max(round((wl - vlmin0) / dvl0), 0))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _ssq_cwt_log_piecewise_par(Wx, dWx, out, const, gamma, vlmin0, vlmin1,
dvl0, dvl1, idx1, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
wl = np.log2(w_ij)
if wl > vlmin1:
k = int(min(round((wl - vlmin1) / dvl1) + idx1, omax))
else:
k = int(max(round((wl - vlmin0) / dvl0), 0))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _ssq_cwt_log(Wx, dWx, out, const, gamma, vlmin, dvl, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((np.log2(w_ij) - vlmin) / dvl, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _ssq_cwt_log_par(Wx, dWx, out, const, gamma, vlmin, dvl, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((np.log2(w_ij) - vlmin) / dvl, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _ssq_cwt_lin(Wx, dWx, out, const, gamma, vmin, dv, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((w_ij - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _ssq_cwt_lin_par(Wx, dWx, out, const, gamma, vmin, dv, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((w_ij - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _ssq_stft(Wx, dWx, Sfs, out, const, gamma, vmin, dv, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs(
Sfs[i] - (B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((w_ij - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _ssq_stft_par(Wx, dWx, Sfs, out, const, gamma, vmin, dv, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs(
Sfs[i] - (B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((w_ij - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
#### CPU funcs & GPU kernel codes ############################################
_cpu_fns = {
'ssq_cwt_log_piecewise': _ssq_cwt_log_piecewise,
'ssq_cwt_log_piecewise_par': _ssq_cwt_log_piecewise_par,
'ssq_cwt_log': _ssq_cwt_log,
'ssq_cwt_log_par': _ssq_cwt_log_par,
'ssq_cwt_lin': _ssq_cwt_lin,
'ssq_cwt_lin_par': _ssq_cwt_lin_par,
'ssq_stft': _ssq_stft,
'ssq_stft_par': _ssq_stft_par,
'indexed_sum_log_piecewise': _indexed_sum_log_piecewise,
'indexed_sum_log_piecewise_par': _indexed_sum_log_piecewise_par,
'indexed_sum_log': _indexed_sum_log,
'indexed_sum_log_par': _indexed_sum_log_par,
'indexed_sum_lin': _indexed_sum_lin,
'indexed_sum_lin_par': _indexed_sum_lin_par,
}
_kernel_codes = dict(
ssq_cwt_log_piecewise='''
extern "C" __global__
void ssq_cwt_log_piecewise(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
${dtype} *gamma,
double vlmin0, double vlmin1,
double dvl0, double dvl1,
int idx1) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
double wl;
${dtype} w_ij, A, B, C, D;
for (int i=0; i < ${M}; ++i){
if (norm${f}(2, Wx[i][j]) > *gamma){
A = dWx[i][j][0];
B = dWx[i][j][1];
C = Wx[i][j][0];
D = Wx[i][j][1];
w_ij = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
wl = log2${f}(w_ij);
if (wl > vlmin1){
k = (int)round((wl - vlmin1) / dvl1) + idx1;
if (k >= ${M})
k = ${M} - 1;
} else {
k = (int)round((wl - vlmin0) / dvl0);
if (k < 0)
k = 0;
}
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
ssq_cwt_log='''
extern "C" __global__
void ssq_cwt_log(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
${dtype} *gamma,
double vlmin, double dvl) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
${dtype} w_ij, A, B, C, D;
for (int i=0; i < ${M}; ++i){
if (norm${f}(2, Wx[i][j]) > *gamma){
A = dWx[i][j][0];
B = dWx[i][j][1];
C = Wx[i][j][0];
D = Wx[i][j][1];
w_ij = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
k = (int)round(((double)log2${f}(w_ij) - vlmin) / dvl);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
ssq_cwt_lin='''
extern "C" __global__
void ssq_cwt_lin(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
${dtype} *gamma,
double vmin, double dv) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
${dtype} w_ij, A, B, C, D;
for (int i=0; i < ${M}; ++i){
if (norm${f}(2, Wx[i][j]) > *gamma){
A = dWx[i][j][0];
B = dWx[i][j][1];
C = Wx[i][j][0];
D = Wx[i][j][1];
w_ij = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
k = (int)round(((double)w_ij - vmin) / dv);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
ssq_stft='''
extern "C" __global__
void ssq_stft(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} Sfs[${M}],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
${dtype} *gamma,
double vmin, double dv) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
${dtype} w_ij, A, B, C, D;
for (int i=0; i < ${M}; ++i){
if (norm${f}(2, Wx[i][j]) > *gamma){
A = dWx[i][j][0];
B = dWx[i][j][1];
C = Wx[i][j][0];
D = Wx[i][j][1];
w_ij = abs(Sfs[i] - (B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
k = (int)round(((double)w_ij - vmin) / dv);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
indexed_sum_log_piecewise='''
extern "C" __global__
void indexed_sum_log_piecewise(${dtype} Wx[${M}][${N}][2],
${dtype} w[${M}][${N}],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
double vlmin0, double vlmin1,
double dvl0, double dvl1,
int idx1)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
double wl;
for (int i=0; i < ${M}; ++i){
if (!isinf(w[i][j])){
wl = (double)log2${f}(w[i][j]);
if (wl > vlmin1){
k = (int)round((wl - vlmin1) / dvl1) + idx1;
if (k >= ${M})
k = ${M} - 1;
} else {
k = (int)round((wl - vlmin0) / dvl0);
if (k < 0)
k = 0;
}
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
indexed_sum_log='''
extern "C" __global__
void indexed_sum_log(${dtype} Wx[${M}][${N}][2],
${dtype} w[${M}][${N}],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
double vlmin, double dvl)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
for (int i=0; i < ${M}; ++i){
if (!isinf(w[i][j])){
k = (int)round(((double)log2${f}(w[i][j]) - vlmin) / dvl);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
indexed_sum_lin='''
extern "C" __global__
void indexed_sum_lin(${dtype} Wx[${M}][${N}][2],
${dtype} w[${M}][${N}],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
double vmin, double dv)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
for (int i=0; i < ${M}; ++i){
if (!isinf(w[i][j])){
k = (int)round(((double)(w[i][j]) - vmin) / dv);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
phase_cwt='''
extern "C" __global__
void phase_cwt(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}],
${dtype} *gamma) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= ${M} || j >= ${N})
return;
if (norm${f}(2, Wx[i][j]) < *gamma){
out[i][j] = 1.0/0.0;
return;
}
${dtype} A = dWx[i][j][0];
${dtype} B = dWx[i][j][1];
${dtype} C = Wx[i][j][0];
${dtype} D = Wx[i][j][1];
out[i][j] = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
}
''',
)
###############################################################################
from .utils.common import WARN, EPS64
from .utils.cwt_utils import logscale_transition_idx
| 46,480
| 34.78214
| 82
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.