id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
13,867 | from features import SignalGenerator, dilated_factor
from scipy.interpolate import interp1d
import torch
import numpy as np
import json
import os
hann_window = {}
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
dtype_device = str(y.dtype) + '_' + str(y.device)
wnsize_dtype_device = str(win_size) + '_' + dtype_device
if wnsize_dtype_device not in hann_window:
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
return spec | null |
13,868 | from features import SignalGenerator, dilated_factor
from scipy.interpolate import interp1d
import torch
import numpy as np
import json
import os
class HParams():
def __init__(self, **kwargs):
for k, v in kwargs.items():
if type(v) == dict:
v = HParams(**v)
self[k] = v
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def get_hparams_from_file(config_path):
with open(config_path, "r", encoding="utf-8") as f:
data = f.read()
config = json.loads(data)
hparams = HParams(**config)
return hparams | null |
13,869 | from features import SignalGenerator, dilated_factor
from scipy.interpolate import interp1d
import torch
import numpy as np
import json
import os
def load_checkpoint(checkpoint_path, model, optimizer=None):
assert os.path.isfile(checkpoint_path), f"No such file or directory: {checkpoint_path}"
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
learning_rate = checkpoint_dict['learning_rate']
if optimizer is not None:
optimizer.load_state_dict(checkpoint_dict['optimizer'])
saved_state_dict = {
**checkpoint_dict['pe'],
**checkpoint_dict['flow'],
**checkpoint_dict['text_enc'],
**checkpoint_dict['dec'],
**checkpoint_dict['emb_g']
}
if hasattr(model, 'module'):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
new_state_dict = {}
for k, v in state_dict.items():
try:
new_state_dict[k] = saved_state_dict[k]
except:
new_state_dict[k] = v
if hasattr(model, 'module'):
model.module.load_state_dict(new_state_dict)
else:
model.load_state_dict(new_state_dict)
return model, optimizer, learning_rate, iteration | null |
13,870 | import os
from OpenSSL import crypto
def create_self_signed_cert(certfile, keyfile, certargs, cert_dir="."):
C_F = os.path.join(cert_dir, certfile)
K_F = os.path.join(cert_dir, keyfile)
if not os.path.exists(C_F) or not os.path.exists(K_F):
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
cert = crypto.X509()
cert.get_subject().C = certargs["Country"]
cert.get_subject().ST = certargs["State"]
cert.get_subject().L = certargs["City"]
cert.get_subject().O = certargs["Organization"]
cert.get_subject().OU = certargs["Org. Unit"]
cert.get_subject().CN = 'Example'
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(315360000)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
open(C_F, "wb").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(K_F, "wb").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) | null |
13,871 | from typing import TypeAlias, Union
from const import MAX_SLOT_NUM, MODEL_DIR_STATIC, DiffusionSVCInferenceType, EnumInferenceTypes, EmbedderType, StaticSlot, VoiceChangerType
from dataclasses import dataclass, asdict, field
import os
import json
ModelSlots: TypeAlias = Union[
ModelSlot,
RVCModelSlot,
MMVCv13ModelSlot,
MMVCv15ModelSlot,
SoVitsSvc40ModelSlot,
DDSPSVCModelSlot,
DiffusionSVCModelSlot,
BeatriceModelSlot,
LLVCModelSlot,
EasyVCModelSlot,
]
def loadSlotInfo(model_dir: str, slotIndex: int | StaticSlot) -> ModelSlots:
MODEL_DIR_STATIC = os.path.join(sys._MEIPASS, "model_dir_static") if hasattr(sys, "_MEIPASS") else "model_dir_static"
MAX_SLOT_NUM = 500
def loadAllSlotInfo(model_dir: str):
slotInfos: list[ModelSlots] = []
for slotIndex in range(MAX_SLOT_NUM):
slotInfo = loadSlotInfo(model_dir, slotIndex)
slotInfo.slotIndex = slotIndex # スロットインデックスは動的に注入
slotInfos.append(slotInfo)
slotInfo = loadSlotInfo(MODEL_DIR_STATIC, "Beatrice-JVS")
slotInfos.append(slotInfo)
return slotInfos | null |
13,872 | from typing import TypeAlias, Union
from const import MAX_SLOT_NUM, MODEL_DIR_STATIC, DiffusionSVCInferenceType, EnumInferenceTypes, EmbedderType, StaticSlot, VoiceChangerType
from dataclasses import dataclass, asdict, field
import os
import json
ModelSlots: TypeAlias = Union[
ModelSlot,
RVCModelSlot,
MMVCv13ModelSlot,
MMVCv15ModelSlot,
SoVitsSvc40ModelSlot,
DDSPSVCModelSlot,
DiffusionSVCModelSlot,
BeatriceModelSlot,
LLVCModelSlot,
EasyVCModelSlot,
]
def saveSlotInfo(model_dir: str, slotIndex: int, slotInfo: ModelSlots):
slotDir = os.path.join(model_dir, str(slotIndex))
print("SlotInfo:::", slotInfo)
slotInfoDict = asdict(slotInfo)
slotInfo.slotIndex = -1 # スロットインデックスは動的に注入
json.dump(slotInfoDict, open(os.path.join(slotDir, "params.json"), "w"), indent=4) | null |
13,873 | import sys
from distutils.util import strtobool
from datetime import datetime
import socket
import platform
import os
import argparse
from Exceptions import WeightDownladException
from downloader.SampleDownloader import downloadInitialSamples
from downloader.WeightDownloader import downloadWeight
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
import uvicorn
from mods.ssl import create_self_signed_cert
from voice_changer.VoiceChangerManager import VoiceChangerManager
from sio.MMVC_SocketIOApp import MMVC_SocketIOApp
from restapi.MMVC_Rest import MMVC_Rest
from const import (
NATIVE_CLIENT_FILE_MAC,
NATIVE_CLIENT_FILE_WIN,
SSL_KEY_DIR,
)
import subprocess
import multiprocessing as mp
from mods.log_control import VoiceChangaerLogger
def setupArgParser():
parser = argparse.ArgumentParser()
parser.add_argument("--logLevel", type=str, default="error", help="Log level info|critical|error. (default: error)")
parser.add_argument("-p", type=int, default=18888, help="port")
parser.add_argument("--https", type=strtobool, default=False, help="use https")
parser.add_argument("--test_connect", type=str, default="8.8.8.8", help="test connect to detect ip in https mode. default 8.8.8.8")
parser.add_argument("--httpsKey", type=str, default="ssl.key", help="path for the key of https")
parser.add_argument("--httpsCert", type=str, default="ssl.cert", help="path for the cert of https")
parser.add_argument("--httpsSelfSigned", type=strtobool, default=True, help="generate self-signed certificate")
parser.add_argument("--model_dir", type=str, default="model_dir", help="path to model files")
parser.add_argument("--sample_mode", type=str, default="production", help="rvc_sample_mode")
parser.add_argument("--content_vec_500", type=str, default="pretrain/checkpoint_best_legacy_500.pt", help="path to content_vec_500 model(pytorch)")
parser.add_argument("--content_vec_500_onnx", type=str, default="pretrain/content_vec_500.onnx", help="path to content_vec_500 model(onnx)")
parser.add_argument("--content_vec_500_onnx_on", type=strtobool, default=True, help="use or not onnx for content_vec_500")
parser.add_argument("--hubert_base", type=str, default="pretrain/hubert_base.pt", help="path to hubert_base model(pytorch)")
parser.add_argument("--hubert_base_jp", type=str, default="pretrain/rinna_hubert_base_jp.pt", help="path to hubert_base_jp model(pytorch)")
parser.add_argument("--hubert_soft", type=str, default="pretrain/hubert/hubert-soft-0d54a1f4.pt", help="path to hubert_soft model(pytorch)")
parser.add_argument("--whisper_tiny", type=str, default="pretrain/whisper_tiny.pt", help="path to hubert_soft model(pytorch)")
parser.add_argument("--nsf_hifigan", type=str, default="pretrain/nsf_hifigan/model", help="path to nsf_hifigan model(pytorch)")
parser.add_argument("--crepe_onnx_full", type=str, default="pretrain/crepe_onnx_full.onnx", help="path to crepe_onnx_full")
parser.add_argument("--crepe_onnx_tiny", type=str, default="pretrain/crepe_onnx_tiny.onnx", help="path to crepe_onnx_tiny")
parser.add_argument("--rmvpe", type=str, default="pretrain/rmvpe.pt", help="path to rmvpe")
parser.add_argument("--rmvpe_onnx", type=str, default="pretrain/rmvpe.onnx", help="path to rmvpe onnx")
return parser | null |
13,874 | import sys
from distutils.util import strtobool
from datetime import datetime
import socket
import platform
import os
import argparse
from Exceptions import WeightDownladException
from downloader.SampleDownloader import downloadInitialSamples
from downloader.WeightDownloader import downloadWeight
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
import uvicorn
from mods.ssl import create_self_signed_cert
from voice_changer.VoiceChangerManager import VoiceChangerManager
from sio.MMVC_SocketIOApp import MMVC_SocketIOApp
from restapi.MMVC_Rest import MMVC_Rest
from const import (
NATIVE_CLIENT_FILE_MAC,
NATIVE_CLIENT_FILE_WIN,
SSL_KEY_DIR,
)
import subprocess
import multiprocessing as mp
from mods.log_control import VoiceChangaerLogger
def printMessage(message, level=0):
pf = platform.system()
if pf == "Windows":
if level == 0:
message = f"{message}"
elif level == 1:
message = f" {message}"
elif level == 2:
message = f" {message}"
else:
message = f" {message}"
else:
if level == 0:
message = f"\033[17m{message}\033[0m"
elif level == 1:
message = f"\033[34m {message}\033[0m"
elif level == 2:
message = f"\033[32m {message}\033[0m"
else:
message = f"\033[47m {message}\033[0m"
logger.info(message) | null |
13,875 | import sys
from distutils.util import strtobool
from datetime import datetime
import socket
import platform
import os
import argparse
from Exceptions import WeightDownladException
from downloader.SampleDownloader import downloadInitialSamples
from downloader.WeightDownloader import downloadWeight
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
import uvicorn
from mods.ssl import create_self_signed_cert
from voice_changer.VoiceChangerManager import VoiceChangerManager
from sio.MMVC_SocketIOApp import MMVC_SocketIOApp
from restapi.MMVC_Rest import MMVC_Rest
from const import (
NATIVE_CLIENT_FILE_MAC,
NATIVE_CLIENT_FILE_WIN,
SSL_KEY_DIR,
)
import subprocess
import multiprocessing as mp
from mods.log_control import VoiceChangaerLogger
def localServer(logLevel: str = "critical"):
try:
uvicorn.run(
f"{os.path.basename(__file__)[:-3]}:app_socketio",
host="0.0.0.0",
port=int(PORT),
reload=False if hasattr(sys, "_MEIPASS") else True,
log_level=logLevel,
)
except Exception as e:
logger.error(f"[Voice Changer] Web Server Launch Exception, {e}") | null |
13,876 | from typing import Any, Union, cast
from const import TMP_DIR
import torch
import os
import numpy as np
from dataclasses import dataclass, asdict, field
import resampy
import onnxruntime
from mods.log_control import VoiceChangaerLogger
from voice_changer.IORecorder import IORecorder
from voice_changer.utils.Timer import Timer2
from voice_changer.utils.VoiceChangerIF import VoiceChangerIF
from voice_changer.utils.VoiceChangerModel import AudioInOut, VoiceChangerModel
from Exceptions import (
DeviceCannotSupportHalfPrecisionException,
DeviceChangingException,
HalfPrecisionChangingException,
NoModeLoadedException,
NotEnoughDataExtimateF0,
ONNXInputArgumentException,
PipelineNotInitializedException,
VoiceChangerIsNotSelectedException,
)
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
logger = VoiceChangaerLogger.get_instance().getLogger()
PRINT_CONVERT_PROCESSING: bool = False
def print_convert_processing(mess: str):
if PRINT_CONVERT_PROCESSING is True:
logger.info(mess) | null |
13,877 | from typing import Any, Union, cast
from const import TMP_DIR
import torch
import os
import numpy as np
from dataclasses import dataclass, asdict, field
import resampy
import onnxruntime
from mods.log_control import VoiceChangaerLogger
from voice_changer.IORecorder import IORecorder
from voice_changer.utils.Timer import Timer2
from voice_changer.utils.VoiceChangerIF import VoiceChangerIF
from voice_changer.utils.VoiceChangerModel import AudioInOut, VoiceChangerModel
from Exceptions import (
DeviceCannotSupportHalfPrecisionException,
DeviceChangingException,
HalfPrecisionChangingException,
NoModeLoadedException,
NotEnoughDataExtimateF0,
ONNXInputArgumentException,
PipelineNotInitializedException,
VoiceChangerIsNotSelectedException,
)
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
AudioInOut: TypeAlias = np.ndarray[Any, np.dtype[np.int16]]
def pad_array(arr: AudioInOut, target_length: int):
current_length = arr.shape[0]
if current_length >= target_length:
return arr
else:
pad_width = target_length - current_length
pad_left = pad_width // 2
pad_right = pad_width - pad_left
# padded_arr = np.pad(
# arr, (pad_left, pad_right), "constant", constant_values=(0, 0)
# )
padded_arr = np.pad(arr, (pad_left, pad_right), "edge")
return padded_arr | null |
13,878 | import math
from collections import OrderedDict
from typing import Optional
from torch import Tensor
import torch
import torch.nn as nn
import torch.nn.functional as F
from voice_changer.LLVC.model.cached_convnet import CachedConvNet
def mod_pad(x, chunk_size, pad):
# Mod pad the input to perform integer number of
# inferences
mod = 0
if (x.shape[-1] % chunk_size) != 0:
mod = chunk_size - (x.shape[-1] % chunk_size)
x = F.pad(x, (0, mod))
x = F.pad(x, pad)
return x, mod | null |
13,879 | import torch
import os
import sys
import json
import logging
hann_window = {}
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
if torch.min(y) < -1.0:
print("min value is ", torch.min(y))
if torch.max(y) > 1.0:
print("max value is ", torch.max(y))
global hann_window
dtype_device = str(y.dtype) + "_" + str(y.device)
wnsize_dtype_device = str(win_size) + "_" + dtype_device
if wnsize_dtype_device not in hann_window:
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
dtype=y.dtype, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
n_fft,
hop_length=hop_size,
win_length=win_size,
window=hann_window[wnsize_dtype_device],
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
return spec | null |
13,880 | import torch
import os
import sys
import json
import logging
logger = logging
def load_checkpoint(checkpoint_path, model, optimizer=None):
assert os.path.isfile(
checkpoint_path
), f"No such file or directory: {checkpoint_path}"
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
iteration = checkpoint_dict["iteration"]
learning_rate = checkpoint_dict["learning_rate"]
if optimizer is not None:
optimizer.load_state_dict(checkpoint_dict["optimizer"])
saved_state_dict = checkpoint_dict["model"]
if hasattr(model, "module"):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
new_state_dict = {}
for k, v in state_dict.items():
try:
new_state_dict[k] = saved_state_dict[k]
except:
logger.info("%s is not in the checkpoint" % k)
new_state_dict[k] = v
if hasattr(model, "module"):
model.module.load_state_dict(new_state_dict)
else:
model.load_state_dict(new_state_dict)
logger.info(
"Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration)
)
return model, optimizer, learning_rate, iteration | null |
13,881 | import torch
import os
import sys
import json
import logging
class HParams:
def __init__(self, **kwargs):
for k, v in kwargs.items():
if type(v) == dict:
v = HParams(**v)
self[k] = v
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def get_hparams_from_file(config_path):
with open(config_path, "r") as f:
data = f.read()
config = json.loads(data)
hparams = HParams(**config)
return hparams | null |
13,882 | import torch
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std) | null |
13,883 | import torch
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2) | null |
13,884 | import torch
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts | null |
13,885 | import torch
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1) | null |
13,886 | import sounddevice as sd
from dataclasses import dataclass, field
import numpy as np
from const import ServerAudioDeviceType
from mods.log_control import VoiceChangaerLogger
def dummy_callback(data: np.ndarray, frames, times, status):
pass
ServerAudioDeviceType: TypeAlias = Literal["audioinput", "audiooutput"]
def checkSamplingRate(deviceId: int, desiredSamplingRate: int, type: ServerAudioDeviceType):
if type == "input":
try:
with sd.InputStream(
device=deviceId,
callback=dummy_callback,
dtype="float32",
samplerate=desiredSamplingRate,
):
pass
return True
except Exception as e: # NOQA
print("[checkSamplingRate]", e)
return False
else:
try:
with sd.OutputStream(
device=deviceId,
callback=dummy_callback,
dtype="float32",
samplerate=desiredSamplingRate,
):
pass
return True
except Exception as e: # NOQA
print("[checkSamplingRate]", e)
return False | null |
13,887 | import sounddevice as sd
from dataclasses import dataclass, field
import numpy as np
from const import ServerAudioDeviceType
from mods.log_control import VoiceChangaerLogger
logger = VoiceChangaerLogger.get_instance().getLogger()
class ServerAudioDevice:
kind: ServerAudioDeviceType = "audioinput"
index: int = 0
name: str = ""
hostAPI: str = ""
maxInputChannels: int = 0
maxOutputChannels: int = 0
default_samplerate: int = 0
available_samplerates: list[int] = field(default_factory=lambda: [])
def list_audio_device():
try:
audioDeviceList = sd.query_devices()
except Exception as e:
logger.error("[Voice Changer] ex:query_devices")
logger.exception(e)
raise e
inputAudioDeviceList = [d for d in audioDeviceList if d["max_input_channels"] > 0]
outputAudioDeviceList = [d for d in audioDeviceList if d["max_output_channels"] > 0]
hostapis = sd.query_hostapis()
# print("input:", inputAudioDeviceList)
# print("output:", outputDeviceList)
# print("hostapis", hostapis)
serverAudioInputDevices: list[ServerAudioDevice] = []
serverAudioOutputDevices: list[ServerAudioDevice] = []
for d in inputAudioDeviceList:
serverInputAudioDevice: ServerAudioDevice = ServerAudioDevice(
kind="audioinput",
index=d["index"],
name=d["name"],
hostAPI=hostapis[d["hostapi"]]["name"],
maxInputChannels=d["max_input_channels"],
maxOutputChannels=d["max_output_channels"],
default_samplerate=d["default_samplerate"],
)
serverAudioInputDevices.append(serverInputAudioDevice)
for d in outputAudioDeviceList:
serverOutputAudioDevice: ServerAudioDevice = ServerAudioDevice(
kind="audiooutput",
index=d["index"],
name=d["name"],
hostAPI=hostapis[d["hostapi"]]["name"],
maxInputChannels=d["max_input_channels"],
maxOutputChannels=d["max_output_channels"],
default_samplerate=d["default_samplerate"],
)
serverAudioOutputDevices.append(serverOutputAudioDevice)
# print("check sample rate1")
# for d in serverAudioInputDevices:
# print("check sample rate1-1")
# for sr in SERVER_DEVICE_SAMPLE_RATES:
# print("check sample rate1-2")
# if checkSamplingRate(d.index, sr, "input"):
# d.available_samplerates.append(sr)
# print("check sample rate2")
# for d in serverAudioOutputDevices:
# print("check sample rate2-1")
# for sr in SERVER_DEVICE_SAMPLE_RATES:
# print("check sample rate2-2")
# if checkSamplingRate(d.index, sr, "output"):
# d.available_samplerates.append(sr)
# print("check sample rate3")
return serverAudioInputDevices, serverAudioOutputDevices | null |
13,888 | import os
import json
import torch
from onnxsim import simplify
import onnx
from const import TMP_DIR, EnumInferenceTypes
from data.ModelSlot import RVCModelSlot
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
from voice_changer.RVC.onnxExporter.SynthesizerTrnMs256NSFsid_ONNX import (
SynthesizerTrnMs256NSFsid_ONNX,
)
from voice_changer.RVC.onnxExporter.SynthesizerTrnMs256NSFsid_nono_ONNX import (
SynthesizerTrnMs256NSFsid_nono_ONNX,
)
from voice_changer.RVC.onnxExporter.SynthesizerTrnMs768NSFsid_ONNX import (
SynthesizerTrnMs768NSFsid_ONNX,
)
from voice_changer.RVC.onnxExporter.SynthesizerTrnMs768NSFsid_nono_ONNX import (
SynthesizerTrnMs768NSFsid_nono_ONNX,
)
from voice_changer.RVC.onnxExporter.SynthesizerTrnMsNSFsidNono_webui_ONNX import (
SynthesizerTrnMsNSFsidNono_webui_ONNX,
)
from voice_changer.RVC.onnxExporter.SynthesizerTrnMsNSFsid_webui_ONNX import (
SynthesizerTrnMsNSFsid_webui_ONNX,
)
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
def _export2onnx(input_model, output_model, output_model_simple, is_half, metadata):
TMP_DIR = os.path.join(tmpdir.name, "tmp_dir") if hasattr(sys, "_MEIPASS") else "tmp_dir"
class RVCModelSlot(ModelSlot):
class DeviceManager(object):
def get_instance(cls):
def __init__(self):
def getDevice(self, id: int):
def getOnnxExecutionProvider(self, gpu: int):
def setForceTensor(self, forceTensor: bool):
def halfPrecisionAvailable(self, id: int):
def getDeviceMemory(self, id: int):
class VoiceChangerParamsManager:
def __init__(self):
def get_instance(cls):
def setParams(self, params: VoiceChangerParams):
def export2onnx(gpu: int, modelSlot: RVCModelSlot):
vcparams = VoiceChangerParamsManager.get_instance().params
modelFile = os.path.join(vcparams.model_dir, str(modelSlot.slotIndex), os.path.basename(modelSlot.modelFile))
output_file = os.path.splitext(os.path.basename(modelFile))[0] + ".onnx"
output_file_simple = os.path.splitext(os.path.basename(modelFile))[0] + "_simple.onnx"
output_path = os.path.join(TMP_DIR, output_file)
output_path_simple = os.path.join(TMP_DIR, output_file_simple)
metadata = {
"application": "VC_CLIENT",
"version": "2.1",
"modelType": modelSlot.modelType,
"samplingRate": modelSlot.samplingRate,
"f0": modelSlot.f0,
"embChannels": modelSlot.embChannels,
"embedder": modelSlot.embedder,
"embOutputLayer": modelSlot.embOutputLayer,
"useFinalProj": modelSlot.useFinalProj,
}
gpuMomory = DeviceManager.get_instance().getDeviceMemory(gpu)
print(f"[Voice Changer] exporting onnx... gpu_id:{gpu} gpu_mem:{gpuMomory}")
if gpuMomory > 0:
_export2onnx(modelFile, output_path, output_path_simple, True, metadata)
else:
print("[Voice Changer] Warning!!! onnx export with float32. maybe size is doubled.")
_export2onnx(modelFile, output_path, output_path_simple, False, metadata)
return output_file_simple | null |
13,889 | import torch
from .model import ModelDimensions, Whisper
class ModelDimensions:
n_mels: int
n_audio_ctx: int
n_audio_state: int
n_audio_head: int
n_audio_layer: int
n_vocab: int
n_text_ctx: int
n_text_state: int
n_text_head: int
n_text_layer: int
class Whisper(nn.Module):
def __init__(self, dims: ModelDimensions):
super().__init__()
self.dims = dims
self.encoder = AudioEncoder(
self.dims.n_mels,
self.dims.n_audio_ctx,
self.dims.n_audio_state,
self.dims.n_audio_head,
self.dims.n_audio_layer,
)
self.decoder = TextDecoder(
self.dims.n_vocab,
self.dims.n_text_ctx,
self.dims.n_text_state,
self.dims.n_text_head,
self.dims.n_text_layer,
)
def embed_audio(self, mel: torch.Tensor):
return self.encoder(mel)
def logits(self, tokens: torch.Tensor, audio_features: torch.Tensor):
return self.decoder(tokens, audio_features)
def forward(self, mel: torch.Tensor, tokens: torch.Tensor) -> Dict[str, torch.Tensor]:
return self.decoder(tokens, self.encoder(mel))
def device(self):
return next(self.parameters()).device
def is_multilingual(self):
return self.dims.n_vocab == 51865
def load_model(path) -> Whisper:
device = "cpu"
checkpoint = torch.load(path, map_location=device)
dims = ModelDimensions(**checkpoint["dims"])
model = Whisper(dims)
model.load_state_dict(checkpoint["model_state_dict"])
model = model.to(device)
return model | null |
13,890 | import sys
system_encoding = sys.getdefaultencoding()
if system_encoding != "utf-8":
else:
def make_safe(string):
# replaces any character not representable using the system default encoding with an '?',
# avoiding UnicodeEncodeError (https://github.com/openai/whisper/discussions/729).
return string.encode(system_encoding, errors="replace").decode(system_encoding) | null |
13,891 | import sys
def make_safe(string):
# utf-8 can encode any Unicode code point, so no need to do the round-trip encoding
return string | null |
13,892 | import sys
def exact_div(x, y):
assert x % y == 0
return x // y | null |
13,893 | import os
from functools import lru_cache
from typing import Union
import numpy as np
import torch
import torch.nn.functional as F
from voice_changer.RVC.embedder.whisper.utils import exact_div
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE
The provided code snippet includes necessary dependencies for implementing the `pad_or_trim` function. Write a Python function `def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1)` to solve the following problem:
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
Here is the function:
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array | Pad or trim the audio array to N_SAMPLES, as expected by the encoder. |
13,894 | import os
from functools import lru_cache
from typing import Union
import numpy as np
import torch
import torch.nn.functional as F
from voice_changer.RVC.embedder.whisper.utils import exact_div
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
The provided code snippet includes necessary dependencies for implementing the `log_mel_spectrogram` function. Write a Python function `def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS)` to solve the following problem:
Compute the log-Mel spectrogram of Parameters ---------- audio: Union[str, np.ndarray, torch.Tensor], shape = (*) The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz n_mels: int The number of Mel-frequency filters, only 80 is supported Returns ------- torch.Tensor, shape = (80, n_frames) A Tensor that contains the Mel spectrogram
Here is the function:
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device) # type: ignore
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) # type: ignore
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels) # type: ignore
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec | Compute the log-Mel spectrogram of Parameters ---------- audio: Union[str, np.ndarray, torch.Tensor], shape = (*) The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz n_mels: int The number of Mel-frequency filters, only 80 is supported Returns ------- torch.Tensor, shape = (80, n_frames) A Tensor that contains the Mel spectrogram |
13,895 | from dataclasses import dataclass
from typing import Dict
from typing import Iterable, Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `sinusoids` function. Write a Python function `def sinusoids(length, channels, max_timescale=10000)` to solve the following problem:
Returns sinusoids for positional embedding
Here is the function:
def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) | Returns sinusoids for positional embedding |
13,896 | import Crepe
import os
def bins_to_cents(bins):
"""Converts pitch bins to cents"""
cents = CENTS_PER_BIN * bins + 1997.3794084376191
# Trade quantization error for noise
return dither(cents)
def cents_to_frequency(cents):
"""Converts cents to frequency in Hz"""
return 10 * 2 ** (cents / 1200)
The provided code snippet includes necessary dependencies for implementing the `weighted_argmax` function. Write a Python function `def weighted_argmax(logits)` to solve the following problem:
Sample observations using weighted sum near the argmax
Here is the function:
def weighted_argmax(logits):
"""Sample observations using weighted sum near the argmax"""
# Find center of analysis window
bins = logits.argmax(dim=1)
# Find bounds of analysis window
start = torch.max(torch.tensor(0, device=logits.device), bins - 4)
end = torch.min(torch.tensor(logits.size(1), device=logits.device), bins + 5)
# Mask out everything outside of window
for batch in range(logits.size(0)):
for time in range(logits.size(2)):
logits[batch, :start[batch, time], time] = -float('inf')
logits[batch, end[batch, time]:, time] = -float('inf')
# Construct weights
if not hasattr(weighted_argmax, 'weights'):
weights = bins_to_cents(torch.arange(360))
weighted_argmax.weights = weights[None, :, None]
# Ensure devices are the same (no-op if they are)
weighted_argmax.weights = weighted_argmax.weights.to(logits.device)
# Convert to probabilities
with torch.no_grad():
probs = torch.sigmoid(logits)
# Apply weights
cents = (weighted_argmax.weights * probs).sum(dim=1) / probs.sum(dim=1)
# Convert to frequency in Hz
return bins, cents_to_frequency(cents) | Sample observations using weighted sum near the argmax |
13,897 | import Crepe
import os
PITCH_BINS = 360
The provided code snippet includes necessary dependencies for implementing the `periodicity` function. Write a Python function `def periodicity(probabilities, bins)` to solve the following problem:
Computes the periodicity from the network output and pitch bins
Here is the function:
def periodicity(probabilities, bins):
"""Computes the periodicity from the network output and pitch bins"""
# shape=(batch * time / hop_length, 360)
probs_stacked = probabilities.transpose(1, 2).reshape(-1, PITCH_BINS)
# shape=(batch * time / hop_length, 1)
bins_stacked = bins.reshape(-1, 1).to(torch.int64)
# Use maximum logit over pitch bins as periodicity
periodicity = probs_stacked.gather(1, bins_stacked)
# shape=(batch, time / hop_length)
return periodicity.reshape(probabilities.size(0), probabilities.size(2)) | Computes the periodicity from the network output and pitch bins |
13,898 | import Crepe
import os
def cents_to_bins(cents, quantize_fn=torch.floor):
"""Converts cents to pitch bins"""
bins = (cents - 1997.3794084376191) / CENTS_PER_BIN
return quantize_fn(bins).int()
def frequency_to_cents(frequency):
"""Convert frequency in Hz to cents"""
return 1200 * torch.log2(frequency / 10.)
The provided code snippet includes necessary dependencies for implementing the `frequency_to_bins` function. Write a Python function `def frequency_to_bins(frequency, quantize_fn=torch.floor)` to solve the following problem:
Convert frequency in Hz to pitch bins
Here is the function:
def frequency_to_bins(frequency, quantize_fn=torch.floor):
"""Convert frequency in Hz to pitch bins"""
return cents_to_bins(frequency_to_cents(frequency), quantize_fn) | Convert frequency in Hz to pitch bins |
13,899 | import librosa
import numpy as np
from voice_changer.RVC.pitchExtractor import onnxcrepe
MAX_FMAX = 2006.
def preprocess(audio,
sample_rate,
precision=None,
batch_size=None,
pad=True):
"""Convert audio to model input
Arguments
audio (numpy.ndarray [shape=(time,)])
The audio signals
sample_rate (int)
The sampling rate in Hz
precision (float)
The precision in milliseconds, i.e. the length of each frame
batch_size (int)
The number of frames per batch
pad (bool)
Whether to zero-pad the audio
Returns
frames (numpy.ndarray [shape=(1 + int(time // precision), 1024)])
"""
# Resample
if sample_rate != SAMPLE_RATE:
audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=SAMPLE_RATE)
# Default hop length of 10 ms
hop_length = SAMPLE_RATE / 100 if precision is None else SAMPLE_RATE * precision / 1000
# Get total number of frames
# Maybe pad
if pad:
total_frames = 1 + int(audio.shape[0] / hop_length)
audio = np.pad(
audio,
(WINDOW_SIZE // 2, WINDOW_SIZE // 2))
else:
total_frames = 1 + int((audio.shape[0] - WINDOW_SIZE) / hop_length)
# Default to running all frames in a single batch
batch_size = total_frames if batch_size is None else batch_size
# Generate batches
for i in range(0, total_frames, batch_size):
# Batch indices
start = max(0, int(i * hop_length))
end = min(audio.shape[0],
int((i + batch_size - 1) * hop_length) + WINDOW_SIZE)
# Chunk
n_bytes = audio.strides[-1]
frames = np.lib.stride_tricks.as_strided(
audio[start:end],
shape=((end - start - WINDOW_SIZE) // int(hop_length) + 1, WINDOW_SIZE),
strides=(int(hop_length) * n_bytes, n_bytes)) # shape=(batch, 1024)
# Note:
# Z-score standardization operations originally located here
# (https://github.com/maxrmorrison/torchcrepe/blob/master/torchcrepe/core.py#L692)
# are wrapped into the ONNX models for hardware acceleration.
yield frames
def infer(session, frames):
"""Forward pass through the model
Arguments
session (onnxcrepe.CrepeInferenceSession)
An onnxruntime.InferenceSession holding the CREPE model
frames (numpy.ndarray [shape=(time / precision, 1024)])
The network input
Returns
logits (numpy.ndarray [shape=(1 + int(time // precision), 360)])
"""
# Apply model
return session.run(None, {'frames': frames})[0]
def postprocess(probabilities,
fmin=0.,
fmax=MAX_FMAX,
decoder=onnxcrepe.decode.weighted_viterbi,
return_periodicity=False):
"""Convert model output to F0 and periodicity
Arguments
probabilities (numpy.ndarray [shape=(1, 360, time / precision)])
The probabilities for each pitch bin inferred by the network
fmin (float)
The minimum allowable frequency in Hz
fmax (float)
The maximum allowable frequency in Hz
decoder (function)
The decoder to use. See decode.py for decoders.
return_periodicity (bool)
Whether to also return the network confidence
Returns
pitch (numpy.ndarray [shape=(1, 1 + int(time // precision))])
periodicity (numpy.ndarray [shape=(1, 1 + int(time // precision))])
"""
# Convert frequency range to pitch bin range
minidx = onnxcrepe.convert.frequency_to_bins(fmin)
maxidx = onnxcrepe.convert.frequency_to_bins(fmax, np.ceil)
# Remove frequencies outside allowable range
probabilities[:, :minidx] = float('-inf')
probabilities[:, maxidx:] = float('-inf')
# Perform argmax or viterbi sampling
bins, pitch = decoder(probabilities)
if not return_periodicity:
return pitch
# Compute periodicity from probabilities and decoded pitch bins
return pitch, periodicity(probabilities, bins)
def periodicity(probabilities, bins):
"""Computes the periodicity from the network output and pitch bins"""
# shape=(time / precision, 360)
probs_stacked = probabilities.transpose(0, 2, 1).reshape(-1, PITCH_BINS)
# shape=(time / precision, 1)
bins_stacked = bins.reshape(-1, 1).astype(np.int64)
# Use maximum logit over pitch bins as periodicity
periodicity = np.take_along_axis(probs_stacked, bins_stacked, axis=1)
# shape=(batch, time / precision)
return periodicity.reshape(probabilities.shape[0], probabilities.shape[2])
The provided code snippet includes necessary dependencies for implementing the `predict` function. Write a Python function `def predict(session, audio, sample_rate, precision=None, fmin=50., fmax=MAX_FMAX, decoder=onnxcrepe.decode.weighted_viterbi, return_periodicity=False, batch_size=None, pad=True)` to solve the following problem:
Performs pitch estimation Arguments session (onnxcrepe.CrepeInferenceSession) An onnxruntime.InferenceSession holding the CREPE model audio (numpy.ndarray [shape=(n_samples,)]) The audio signal sample_rate (int) The sampling rate in Hz precision (float) The precision in milliseconds, i.e. the length of each frame fmin (float) The minimum allowable frequency in Hz fmax (float) The maximum allowable frequency in Hz decoder (function) The decoder to use. See decode.py for decoders. return_periodicity (bool) Whether to also return the network confidence batch_size (int) The number of frames per batch pad (bool) Whether to zero-pad the audio Returns pitch (numpy.ndarray [shape=(1, 1 + int(time // precision))]) (Optional) periodicity (numpy.ndarray [shape=(1, 1 + int(time // precision))])
Here is the function:
def predict(session,
audio,
sample_rate,
precision=None,
fmin=50.,
fmax=MAX_FMAX,
decoder=onnxcrepe.decode.weighted_viterbi,
return_periodicity=False,
batch_size=None,
pad=True):
"""Performs pitch estimation
Arguments
session (onnxcrepe.CrepeInferenceSession)
An onnxruntime.InferenceSession holding the CREPE model
audio (numpy.ndarray [shape=(n_samples,)])
The audio signal
sample_rate (int)
The sampling rate in Hz
precision (float)
The precision in milliseconds, i.e. the length of each frame
fmin (float)
The minimum allowable frequency in Hz
fmax (float)
The maximum allowable frequency in Hz
decoder (function)
The decoder to use. See decode.py for decoders.
return_periodicity (bool)
Whether to also return the network confidence
batch_size (int)
The number of frames per batch
pad (bool)
Whether to zero-pad the audio
Returns
pitch (numpy.ndarray [shape=(1, 1 + int(time // precision))])
(Optional) periodicity (numpy.ndarray
[shape=(1, 1 + int(time // precision))])
"""
results = []
# Preprocess audio
generator = preprocess(audio,
sample_rate,
precision,
batch_size,
pad)
for frames in generator:
# Infer independent probabilities for each pitch bin
probabilities = infer(session, frames) # shape=(batch, 360)
probabilities = probabilities.transpose(1, 0)[None] # shape=(1, 360, batch)
# Convert probabilities to F0 and periodicity
result = postprocess(probabilities,
fmin,
fmax,
decoder,
return_periodicity)
# Place on same device as audio to allow very long inputs
if isinstance(result, tuple):
result = (result[0], result[1])
results.append(result)
# Split pitch and periodicity
if return_periodicity:
pitch, periodicity = zip(*results)
return np.concatenate(pitch, axis=1), np.concatenate(periodicity, axis=1)
# Concatenate
return np.concatenate(results, axis=1) | Performs pitch estimation Arguments session (onnxcrepe.CrepeInferenceSession) An onnxruntime.InferenceSession holding the CREPE model audio (numpy.ndarray [shape=(n_samples,)]) The audio signal sample_rate (int) The sampling rate in Hz precision (float) The precision in milliseconds, i.e. the length of each frame fmin (float) The minimum allowable frequency in Hz fmax (float) The maximum allowable frequency in Hz decoder (function) The decoder to use. See decode.py for decoders. return_periodicity (bool) Whether to also return the network confidence batch_size (int) The number of frames per batch pad (bool) Whether to zero-pad the audio Returns pitch (numpy.ndarray [shape=(1, 1 + int(time // precision))]) (Optional) periodicity (numpy.ndarray [shape=(1, 1 + int(time // precision))]) |
13,900 | import numpy as np
def nanfilter(signals, win_length, filter_fn):
"""Filters a sequence, ignoring nan values
Arguments
signals (numpy.ndarray (shape=(batch, time)))
The signals to filter
win_length
The size of the analysis window
filter_fn (function)
The function to use for filtering
Returns
filtered (numpy.ndarray (shape=(batch, time)))
"""
# Output buffer
filtered = np.empty_like(signals)
# Loop over frames
for i in range(signals.shape[1]):
# Get analysis window bounds
start = max(0, i - win_length // 2)
end = min(signals.shape[1], i + win_length // 2 + 1)
# Apply filter to window
filtered[:, i] = filter_fn(signals[:, start:end])
return filtered
def nanmean(signals):
"""Computes the mean, ignoring nans
Arguments
signals (numpy.ndarray [shape=(batch, time)])
The signals to filter
Returns
filtered (numpy.ndarray [shape=(batch, time)])
"""
signals = signals.clone()
# Find nans
nans = np.isnan(signals)
# Set nans to 0.
signals[nans] = 0.
# Compute average
return signals.sum(axis=1) / (~nans).astype(np.float32).sum(axis=1)
The provided code snippet includes necessary dependencies for implementing the `mean` function. Write a Python function `def mean(signals, win_length=9)` to solve the following problem:
Averave filtering for signals containing nan values Arguments signals (numpy.ndarray (shape=(batch, time))) The signals to filter win_length The size of the analysis window Returns filtered (numpy.ndarray (shape=(batch, time)))
Here is the function:
def mean(signals, win_length=9):
"""Averave filtering for signals containing nan values
Arguments
signals (numpy.ndarray (shape=(batch, time)))
The signals to filter
win_length
The size of the analysis window
Returns
filtered (numpy.ndarray (shape=(batch, time)))
"""
return nanfilter(signals, win_length, nanmean) | Averave filtering for signals containing nan values Arguments signals (numpy.ndarray (shape=(batch, time))) The signals to filter win_length The size of the analysis window Returns filtered (numpy.ndarray (shape=(batch, time))) |
13,901 | import warnings
import librosa
import numpy as np
from voice_changer.RVC.pitchExtractor import onnxcrepe
MIN_DB = -100.
def perceptual_weights():
"""A-weighted frequency-dependent perceptual loudness weights"""
frequencies = librosa.fft_frequencies(sr=onnxcrepe.SAMPLE_RATE,
n_fft=onnxcrepe.WINDOW_SIZE)
# A warning is raised for nearly inaudible frequencies, but it ends up
# defaulting to -100 db. That default is fine for our purposes.
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
return librosa.A_weighting(frequencies)[:, None] - REF_DB
The provided code snippet includes necessary dependencies for implementing the `a_weighted` function. Write a Python function `def a_weighted(audio, sample_rate, hop_length=None, pad=True)` to solve the following problem:
Retrieve the per-frame loudness
Here is the function:
def a_weighted(audio, sample_rate, hop_length=None, pad=True):
"""Retrieve the per-frame loudness"""
# Default hop length of 10 ms
hop_length = sample_rate // 100 if hop_length is None else hop_length
# Convert to numpy
audio = audio.squeeze(0)
# Resample
if sample_rate != onnxcrepe.SAMPLE_RATE:
audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=onnxcrepe.SAMPLE_RATE)
hop_length = int(hop_length * onnxcrepe.SAMPLE_RATE / sample_rate)
# Cache weights
if not hasattr(a_weighted, 'weights'):
a_weighted.weights = perceptual_weights()
# Take stft
stft = librosa.stft(audio,
n_fft=onnxcrepe.WINDOW_SIZE,
hop_length=hop_length,
win_length=onnxcrepe.WINDOW_SIZE,
center=pad,
pad_mode='constant')
# Compute magnitude on db scale
db = librosa.amplitude_to_db(np.abs(stft))
# Apply A-weighting
weighted = db + a_weighted.weights
# Threshold
weighted[weighted < MIN_DB] = MIN_DB
# Average over weighted frequencies
return weighted.mean(axis=0).astype(np.float32)[None] | Retrieve the per-frame loudness |
13,902 | import numpy as np
import scipy
from voice_changer.RVC.pitchExtractor import onnxcrepe
def bins_to_cents(bins, apply_dither=False):
"""Converts pitch bins to cents"""
cents = onnxcrepe.CENTS_PER_BIN * bins + 1997.3794084376191
# Trade quantization error for noise (disabled by default)
return dither(cents) if apply_dither else cents
def cents_to_frequency(cents):
"""Converts cents to frequency in Hz"""
return 10 * 2 ** (cents / 1200)
The provided code snippet includes necessary dependencies for implementing the `bins_to_frequency` function. Write a Python function `def bins_to_frequency(bins, apply_dither=False)` to solve the following problem:
Converts pitch bins to frequency in Hz
Here is the function:
def bins_to_frequency(bins, apply_dither=False):
"""Converts pitch bins to frequency in Hz"""
return cents_to_frequency(bins_to_cents(bins, apply_dither=apply_dither)) | Converts pitch bins to frequency in Hz |
13,903 | import numpy as np
import scipy
from voice_changer.RVC.pitchExtractor import onnxcrepe
def cents_to_bins(cents, quantize_fn=np.floor):
"""Converts cents to pitch bins"""
bins = (cents - 1997.3794084376191) / onnxcrepe.CENTS_PER_BIN
return quantize_fn(bins).astype(np.int64)
def frequency_to_cents(frequency):
"""Convert frequency in Hz to cents"""
return 1200 * np.log2(frequency / 10.)
The provided code snippet includes necessary dependencies for implementing the `frequency_to_bins` function. Write a Python function `def frequency_to_bins(frequency, quantize_fn=np.floor)` to solve the following problem:
Convert frequency in Hz to pitch bins
Here is the function:
def frequency_to_bins(frequency, quantize_fn=np.floor):
"""Convert frequency in Hz to pitch bins"""
return cents_to_bins(frequency_to_cents(frequency), quantize_fn) | Convert frequency in Hz to pitch bins |
13,904 | import librosa
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `audio` function. Write a Python function `def audio(filename)` to solve the following problem:
Load audio from disk
Here is the function:
def audio(filename):
"""Load audio from disk"""
samples, sr = librosa.load(filename, sr=None)
if len(samples.shape) > 1:
# To mono
samples = np.mean(samples, axis=1)
return samples, sr | Load audio from disk |
13,905 | import librosa
import numpy as np
from voice_changer.RVC.pitchExtractor import onnxcrepe
def argmax(logits):
"""Sample observations by taking the argmax"""
bins = logits.argmax(axis=1)
# Convert to frequency in Hz
return bins, onnxcrepe.convert.bins_to_frequency(bins)
def _apply_weights(logits, bins):
# Find bounds of analysis window
start = np.maximum(0, bins - 4)
end = np.minimum(logits.shape[1], bins + 5)
# Mask out everything outside of window
for batch in range(logits.shape[0]):
for time in range(logits.shape[2]):
logits[batch, :start[batch, time], time] = float('-inf')
logits[batch, end[batch, time]:, time] = float('-inf')
# Construct weights
if not hasattr(_apply_weights, 'weights'):
weights = onnxcrepe.convert.bins_to_cents(np.arange(360))
_apply_weights.weights = weights[None, :, None]
# Convert to probabilities (ReLU)
probs = np.maximum(0, logits)
# Apply weights
cents = (_apply_weights.weights * probs).sum(axis=1) / probs.sum(axis=1)
# Convert to frequency in Hz
return onnxcrepe.convert.cents_to_frequency(cents)
The provided code snippet includes necessary dependencies for implementing the `weighted_argmax` function. Write a Python function `def weighted_argmax(logits: np.ndarray)` to solve the following problem:
Sample observations using weighted sum near the argmax
Here is the function:
def weighted_argmax(logits: np.ndarray):
"""Sample observations using weighted sum near the argmax"""
# Find center of analysis window
bins = logits.argmax(axis=1)
return bins, _apply_weights(logits, bins) | Sample observations using weighted sum near the argmax |
13,906 | import librosa
import numpy as np
from voice_changer.RVC.pitchExtractor import onnxcrepe
def viterbi(logits):
"""Sample observations using viterbi decoding"""
# Create viterbi transition matrix
if not hasattr(viterbi, 'transition'):
xx, yy = np.meshgrid(range(360), range(360))
transition = np.maximum(12 - abs(xx - yy), 0)
transition = transition / transition.sum(axis=1, keepdims=True)
viterbi.transition = transition
# Normalize logits (softmax)
logits -= logits.max(axis=1)
exp = np.exp(logits)
probs = exp / np.sum(exp, axis=1)
# Perform viterbi decoding
bins = np.array([
librosa.sequence.viterbi(sequence, viterbi.transition).astype(np.int64)
for sequence in probs])
# Convert to frequency in Hz
return bins, onnxcrepe.convert.bins_to_frequency(bins)
def _apply_weights(logits, bins):
# Find bounds of analysis window
start = np.maximum(0, bins - 4)
end = np.minimum(logits.shape[1], bins + 5)
# Mask out everything outside of window
for batch in range(logits.shape[0]):
for time in range(logits.shape[2]):
logits[batch, :start[batch, time], time] = float('-inf')
logits[batch, end[batch, time]:, time] = float('-inf')
# Construct weights
if not hasattr(_apply_weights, 'weights'):
weights = onnxcrepe.convert.bins_to_cents(np.arange(360))
_apply_weights.weights = weights[None, :, None]
# Convert to probabilities (ReLU)
probs = np.maximum(0, logits)
# Apply weights
cents = (_apply_weights.weights * probs).sum(axis=1) / probs.sum(axis=1)
# Convert to frequency in Hz
return onnxcrepe.convert.cents_to_frequency(cents)
The provided code snippet includes necessary dependencies for implementing the `weighted_viterbi` function. Write a Python function `def weighted_viterbi(logits)` to solve the following problem:
Sample observations combining viterbi decoding and weighted argmax
Here is the function:
def weighted_viterbi(logits):
"""Sample observations combining viterbi decoding and weighted argmax"""
bins, _ = viterbi(logits)
return bins, _apply_weights(logits, bins) | Sample observations combining viterbi decoding and weighted argmax |
13,907 | import os
import traceback
import faiss
from Exceptions import PipelineCreateException
from data.ModelSlot import RVCModelSlot
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
from voice_changer.RVC.embedder.EmbedderManager import EmbedderManager
from voice_changer.RVC.inferencer.InferencerManager import InferencerManager
from voice_changer.RVC.pipeline.Pipeline import Pipeline
from voice_changer.RVC.pitchExtractor.PitchExtractorManager import PitchExtractorManager
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
def _loadIndex(indexPath: str):
# Indexのロード
print("[Voice Changer] Loading index...")
# ファイル指定があってもファイルがない場合はNone
if os.path.exists(indexPath) is not True or os.path.isfile(indexPath) is not True:
print("[Voice Changer] Index file is not found")
return None
try:
print("Try loading...", indexPath)
index = faiss.read_index(indexPath)
except: # NOQA
print("[Voice Changer] load index failed. Use no index.")
traceback.print_exc()
return None
return index
class PipelineCreateException(Exception):
def __str__(self):
return repr("Failed to create Pipeline.")
class RVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "RVC"
modelFile: str = ""
indexFile: str = ""
defaultTune: int = 0
defaultIndexRatio: int = 0
defaultProtect: float = 0.5
isONNX: bool = False
modelType: str = EnumInferenceTypes.pyTorchRVC.value
samplingRate: int = -1
f0: bool = True
embChannels: int = 256
embOutputLayer: int = 9
useFinalProj: bool = True
deprecated: bool = False
embedder: EmbedderType = "hubert_base"
sampleId: str = ""
speakers: dict = field(default_factory=lambda: {0: "target"})
version: str = "v2"
class DeviceManager(object):
_instance = None
forceTensor: bool = False
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self.gpu_num = torch.cuda.device_count()
self.mps_enabled: bool = (
getattr(torch.backends, "mps", None) is not None
and torch.backends.mps.is_available()
)
def getDevice(self, id: int):
if id < 0 or self.gpu_num == 0:
if self.mps_enabled is False:
dev = torch.device("cpu")
else:
dev = torch.device("mps")
else:
if id < self.gpu_num:
dev = torch.device("cuda", index=id)
else:
print("[Voice Changer] device detection error, fallback to cpu")
dev = torch.device("cpu")
return dev
def getOnnxExecutionProvider(self, gpu: int):
availableProviders = onnxruntime.get_available_providers()
devNum = torch.cuda.device_count()
if gpu >= 0 and "CUDAExecutionProvider" in availableProviders and devNum > 0:
if gpu < devNum: # ひとつ前のif文で弾いてもよいが、エラーの解像度を上げるため一段下げ。
return ["CUDAExecutionProvider"], [{"device_id": gpu}]
else:
print("[Voice Changer] device detection error, fallback to cpu")
return ["CPUExecutionProvider"], [
{
"intra_op_num_threads": 8,
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
"inter_op_num_threads": 8,
}
]
elif gpu >= 0 and "DmlExecutionProvider" in availableProviders:
return ["DmlExecutionProvider"], [{"device_id": gpu}]
else:
return ["CPUExecutionProvider"], [
{
"intra_op_num_threads": 8,
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
"inter_op_num_threads": 8,
}
]
def setForceTensor(self, forceTensor: bool):
self.forceTensor = forceTensor
def halfPrecisionAvailable(self, id: int):
if self.gpu_num == 0:
return False
if id < 0:
return False
if self.forceTensor:
return False
try:
gpuName = torch.cuda.get_device_name(id).upper()
if (
("16" in gpuName and "V100" not in gpuName)
or "P40" in gpuName.upper()
or "1070" in gpuName
or "1080" in gpuName
):
return False
except Exception as e:
print(e)
return False
cap = torch.cuda.get_device_capability(id)
if cap[0] < 7: # コンピューティング機能が7以上の場合half precisionが使えるとされている(が例外がある?T500とか)
return False
return True
def getDeviceMemory(self, id: int):
try:
return torch.cuda.get_device_properties(id).total_memory
except Exception as e:
# except:
print(e)
return 0
class EmbedderManager:
currentEmbedder: Embedder | None = None
params: VoiceChangerParams
def initialize(cls, params: VoiceChangerParams):
cls.params = params
def getEmbedder(cls, embederType: EmbedderType, isHalf: bool, dev: device) -> Embedder:
if cls.currentEmbedder is None:
print("[Voice Changer] generate new embedder. (no embedder)")
cls.currentEmbedder = cls.loadEmbedder(embederType, isHalf, dev)
elif cls.currentEmbedder.matchCondition(embederType) is False:
print("[Voice Changer] generate new embedder. (not match)")
cls.currentEmbedder = cls.loadEmbedder(embederType, isHalf, dev)
else:
print("[Voice Changer] generate new embedder. (anyway)")
cls.currentEmbedder = cls.loadEmbedder(embederType, isHalf, dev)
# cls.currentEmbedder.setDevice(dev)
# cls.currentEmbedder.setHalf(isHalf)
return cls.currentEmbedder
def loadEmbedder(cls, embederType: EmbedderType, isHalf: bool, dev: device) -> Embedder:
if embederType == "hubert_base":
try:
if cls.params.content_vec_500_onnx_on is False:
raise Exception("[Voice Changer][Embedder] onnx is off")
file = cls.params.content_vec_500_onnx
return OnnxContentvec().loadModel(file, dev)
except Exception as e: # noqa
print("[Voice Changer] use torch contentvec", e)
file = cls.params.hubert_base
return FairseqHubert().loadModel(file, dev, isHalf)
elif embederType == "hubert-base-japanese":
file = cls.params.hubert_base_jp
return FairseqHubertJp().loadModel(file, dev, isHalf)
elif embederType == "contentvec":
try:
if cls.params.content_vec_500_onnx_on is False:
raise Exception("[Voice Changer][Embedder] onnx is off")
file = cls.params.content_vec_500_onnx
return OnnxContentvec().loadModel(file, dev)
except Exception as e:
print(e)
file = cls.params.hubert_base
return FairseqContentvec().loadModel(file, dev, isHalf)
elif embederType == "whisper":
file = cls.params.whisper_tiny
return Whisper().loadModel(file, dev, isHalf)
else:
return FairseqHubert().loadModel(file, dev, isHalf)
class InferencerManager:
currentInferencer: Inferencer | None = None
def getInferencer(
cls,
inferencerType: EnumInferenceTypes,
file: str,
gpu: int,
inferencerTypeVersion: str | None = None,
) -> Inferencer:
cls.currentInferencer = cls.loadInferencer(inferencerType, file, gpu, inferencerTypeVersion)
return cls.currentInferencer
def loadInferencer(
cls,
inferencerType: EnumInferenceTypes,
file: str,
gpu: int,
inferencerTypeVersion: str | None = None,
) -> Inferencer:
if inferencerType == EnumInferenceTypes.pyTorchRVC or inferencerType == EnumInferenceTypes.pyTorchRVC.value:
return RVCInferencer().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchRVCNono or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value:
return RVCInferencerNono().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2 or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value:
return RVCInferencerv2().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchVoRASbeta or inferencerType == EnumInferenceTypes.pyTorchVoRASbeta.value:
if sys.platform.startswith("darwin") is False:
from voice_changer.RVC.inferencer.VorasInferencebeta import VoRASInferencer
return VoRASInferencer().loadModel(file, gpu)
else:
raise RuntimeError("[Voice Changer] VoRAS is not supported on macOS")
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value:
return RVCInferencerv2Nono().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchWebUI or inferencerType == EnumInferenceTypes.pyTorchWebUI.value:
return WebUIInferencer().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchWebUINono or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value:
return WebUIInferencerNono().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.onnxRVC or inferencerType == EnumInferenceTypes.onnxRVC.value:
return OnnxRVCInferencer().loadModel(file, gpu, inferencerTypeVersion)
elif inferencerType == EnumInferenceTypes.onnxRVCNono or inferencerType == EnumInferenceTypes.onnxRVCNono.value:
return OnnxRVCInferencerNono().loadModel(file, gpu, inferencerTypeVersion)
elif inferencerType == EnumInferenceTypes.easyVC or inferencerType == EnumInferenceTypes.easyVC.value:
return EasyVCInferencerONNX().loadModel(file, gpu)
else:
raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)
class Pipeline(object):
embedder: Embedder
inferencer: Inferencer
pitchExtractor: PitchExtractor
index: Any | None
big_npy: Any | None
# feature: Any | None
targetSR: int
device: torch.device
isHalf: bool
def __init__(
self,
embedder: Embedder,
inferencer: Inferencer,
pitchExtractor: PitchExtractor,
index: Any | None,
# feature: Any | None,
targetSR,
device,
isHalf,
):
self.embedder = embedder
self.inferencer = inferencer
self.pitchExtractor = pitchExtractor
logger.info("GENERATE INFERENCER" + str(self.inferencer))
logger.info("GENERATE EMBEDDER" + str(self.embedder))
logger.info("GENERATE PITCH EXTRACTOR" + str(self.pitchExtractor))
self.index = index
self.big_npy = index.reconstruct_n(0, index.ntotal) if index is not None else None
# self.feature = feature
self.targetSR = targetSR
self.device = device
self.isHalf = isHalf
self.sr = 16000
self.window = 160
def getPipelineInfo(self):
inferencerInfo = self.inferencer.getInferencerInfo() if self.inferencer else {}
embedderInfo = self.embedder.getEmbedderInfo()
pitchExtractorInfo = self.pitchExtractor.getPitchExtractorInfo()
return {"inferencer": inferencerInfo, "embedder": embedderInfo, "pitchExtractor": pitchExtractorInfo, "isHalf": self.isHalf}
def setPitchExtractor(self, pitchExtractor: PitchExtractor):
self.pitchExtractor = pitchExtractor
def extractPitch(self, audio_pad, if_f0, pitchf, f0_up_key, silence_front):
try:
if if_f0 == 1:
pitch, pitchf = self.pitchExtractor.extract(
audio_pad,
pitchf,
f0_up_key,
self.sr,
self.window,
silence_front=silence_front,
)
# pitch = pitch[:p_len]
# pitchf = pitchf[:p_len]
pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
pitchf = torch.tensor(pitchf, device=self.device, dtype=torch.float).unsqueeze(0)
else:
pitch = None
pitchf = None
except IndexError as e: # NOQA
print(e)
import traceback
traceback.print_exc()
raise NotEnoughDataExtimateF0()
return pitch, pitchf
def extractFeatures(self, feats, embOutputLayer, useFinalProj):
with autocast(enabled=self.isHalf):
try:
feats = self.embedder.extractFeatures(feats, embOutputLayer, useFinalProj)
if torch.isnan(feats).all():
raise DeviceCannotSupportHalfPrecisionException()
return feats
except RuntimeError as e:
if "HALF" in e.__str__().upper():
raise HalfPrecisionChangingException()
elif "same device" in e.__str__():
raise DeviceChangingException()
else:
raise e
def infer(self, feats, p_len, pitch, pitchf, sid, out_size):
try:
with torch.no_grad():
with autocast(enabled=self.isHalf):
audio1 = self.inferencer.infer(feats, p_len, pitch, pitchf, sid, out_size)
audio1 = (audio1 * 32767.5).data.to(dtype=torch.int16)
return audio1
except RuntimeError as e:
if "HALF" in e.__str__().upper():
print("HalfPresicion Error:", e)
raise HalfPrecisionChangingException()
else:
raise e
def exec(
self,
sid,
audio, # torch.tensor [n]
pitchf, # np.array [m]
feature, # np.array [m, feat]
f0_up_key,
index_rate,
if_f0,
silence_front,
embOutputLayer,
useFinalProj,
repeat,
protect=0.5,
out_size=None,
):
# print(f"pipeline exec input, audio:{audio.shape}, pitchf:{pitchf.shape}, feature:{feature.shape}")
# print(f"pipeline exec input, silence_front:{silence_front}, out_size:{out_size}")
with Timer2("Pipeline-Exec", False) as t: # NOQA
# 16000のサンプリングレートで入ってきている。以降この世界は16000で処理。
search_index = self.index is not None and self.big_npy is not None and index_rate != 0
# self.t_pad = self.sr * repeat # 1秒
# self.t_pad_tgt = self.targetSR * repeat # 1秒 出力時のトリミング(モデルのサンプリングで出力される)
audio = audio.unsqueeze(0)
quality_padding_sec = (repeat * (audio.shape[1] - 1)) / self.sr # padding(reflect)のサイズは元のサイズより小さい必要がある。
self.t_pad = round(self.sr * quality_padding_sec) # 前後に音声を追加
self.t_pad_tgt = round(self.targetSR * quality_padding_sec) # 前後に音声を追加 出力時のトリミング(モデルのサンプリングで出力される)
audio_pad = F.pad(audio, (self.t_pad, self.t_pad), mode="reflect").squeeze(0)
p_len = audio_pad.shape[0] // self.window
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
# RVC QualityがOnのときにはsilence_frontをオフに。
silence_front = silence_front if repeat == 0 else 0
pitchf = pitchf if repeat == 0 else np.zeros(p_len)
out_size = out_size if repeat == 0 else None
# tensor型調整
feats = audio_pad
if feats.dim() == 2: # double channels
feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
feats = feats.view(1, -1)
t.record("pre-process")
# ピッチ検出
pitch, pitchf = self.extractPitch(audio_pad, if_f0, pitchf, f0_up_key, silence_front)
t.record("extract-pitch")
# embedding
feats = self.extractFeatures(feats, embOutputLayer, useFinalProj)
t.record("extract-feats")
# Index - feature抽出
# if self.index is not None and self.feature is not None and index_rate != 0:
if search_index:
npy = feats[0].cpu().numpy()
# apply silent front for indexsearch
npyOffset = math.floor(silence_front * 16000) // 360
npy = npy[npyOffset:]
if self.isHalf is True:
npy = npy.astype("float32")
# TODO: kは調整できるようにする
k = 1
if k == 1:
_, ix = self.index.search(npy, 1)
npy = self.big_npy[ix.squeeze()]
else:
score, ix = self.index.search(npy, k=8)
weight = np.square(1 / score)
weight /= weight.sum(axis=1, keepdims=True)
npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
# recover silient font
npy = np.concatenate([np.zeros([npyOffset, npy.shape[1]], dtype=np.float32), feature[:npyOffset:2].astype("float32"), npy])[-feats.shape[1]:]
feats = torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate + (1 - index_rate) * feats
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
if protect < 0.5 and search_index:
feats0 = feats.clone()
# ピッチサイズ調整
p_len = audio_pad.shape[0] // self.window
if feats.shape[1] < p_len:
p_len = feats.shape[1]
if pitch is not None and pitchf is not None:
pitch = pitch[:, :p_len]
pitchf = pitchf[:, :p_len]
feats_len = feats.shape[1]
if pitch is not None and pitchf is not None:
pitch = pitch[:, -feats_len:]
pitchf = pitchf[:, -feats_len:]
p_len = torch.tensor([feats_len], device=self.device).long()
# pitchの推定が上手くいかない(pitchf=0)場合、検索前の特徴を混ぜる
# pitchffの作り方の疑問はあるが、本家通りなので、このまま使うことにする。
# https://github.com/w-okada/voice-changer/pull/276#issuecomment-1571336929
if protect < 0.5 and search_index:
pitchff = pitchf.clone()
pitchff[pitchf > 0] = 1
pitchff[pitchf < 1] = protect
pitchff = pitchff.unsqueeze(-1)
feats = feats * pitchff + feats0 * (1 - pitchff)
feats = feats.to(feats0.dtype)
p_len = torch.tensor([p_len], device=self.device).long()
# apply silent front for inference
if type(self.inferencer) in [OnnxRVCInferencer, OnnxRVCInferencerNono]:
npyOffset = math.floor(silence_front * 16000) // 360
feats = feats[:, npyOffset * 2 :, :] # NOQA
feats_len = feats.shape[1]
if pitch is not None and pitchf is not None:
pitch = pitch[:, -feats_len:]
pitchf = pitchf[:, -feats_len:]
p_len = torch.tensor([feats_len], device=self.device).long()
t.record("mid-precess")
# 推論実行
audio1 = self.infer(feats, p_len, pitch, pitchf, sid, out_size)
t.record("infer")
feats_buffer = feats.squeeze(0).detach().cpu()
if pitchf is not None:
pitchf_buffer = pitchf.squeeze(0).detach().cpu()
else:
pitchf_buffer = None
del p_len, pitch, pitchf, feats
# torch.cuda.empty_cache()
# inferで出力されるサンプリングレートはモデルのサンプリングレートになる。
# pipelineに(入力されるときはhubertように16k)
if self.t_pad_tgt != 0:
offset = self.t_pad_tgt
end = -1 * self.t_pad_tgt
audio1 = audio1[offset:end]
del sid
t.record("post-process")
# torch.cuda.empty_cache()
# print("EXEC AVERAGE:", t.avrSecs)
return audio1, pitchf_buffer, feats_buffer
def __del__(self):
del self.embedder
del self.inferencer
del self.pitchExtractor
print('Pipeline has been deleted')
class PitchExtractorManager(Protocol):
currentPitchExtractor: PitchExtractor | None = None
params: VoiceChangerParams
def initialize(cls, params: VoiceChangerParams):
cls.params = params
def getPitchExtractor(
cls, pitchExtractorType: PitchExtractorType, gpu: int
) -> PitchExtractor:
cls.currentPitchExtractor = cls.loadPitchExtractor(pitchExtractorType, gpu)
return cls.currentPitchExtractor
def loadPitchExtractor(
cls, pitchExtractorType: PitchExtractorType, gpu: int
) -> PitchExtractor:
if pitchExtractorType == "harvest":
return HarvestPitchExtractor()
elif pitchExtractorType == "dio":
return DioPitchExtractor()
elif pitchExtractorType == "crepe":
return CrepePitchExtractor(gpu)
elif pitchExtractorType == "crepe_tiny":
return CrepeOnnxPitchExtractor(pitchExtractorType, cls.params.crepe_onnx_tiny, gpu)
elif pitchExtractorType == "crepe_full":
return CrepeOnnxPitchExtractor(pitchExtractorType, cls.params.crepe_onnx_full, gpu)
elif pitchExtractorType == "rmvpe":
return RMVPEPitchExtractor(cls.params.rmvpe, gpu)
elif pitchExtractorType == "rmvpe_onnx":
return RMVPEOnnxPitchExtractor(cls.params.rmvpe_onnx, gpu)
elif pitchExtractorType == "fcpe":
# add the FcpePitchExtractor
return FcpePitchExtractor(gpu)
else:
# return hubert as default
print("[Voice Changer] PitchExctractor not found", pitchExtractorType)
print(" fallback to dio")
return DioPitchExtractor()
class VoiceChangerParams:
model_dir: str
content_vec_500: str
content_vec_500_onnx: str
content_vec_500_onnx_on: bool
hubert_base: str
hubert_base_jp: str
hubert_soft: str
nsf_hifigan: str
sample_mode: str
crepe_onnx_full: str
crepe_onnx_tiny: str
rmvpe: str
rmvpe_onnx: str
whisper_tiny: str
def createPipeline(params: VoiceChangerParams, modelSlot: RVCModelSlot, gpu: int, f0Detector: str):
dev = DeviceManager.get_instance().getDevice(gpu)
half = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
# Inferencer 生成
try:
modelPath = os.path.join(params.model_dir, str(modelSlot.slotIndex), os.path.basename(modelSlot.modelFile))
inferencer = InferencerManager.getInferencer(modelSlot.modelType, modelPath, gpu, modelSlot.version)
except Exception as e:
print("[Voice Changer] exception! loading inferencer", e)
traceback.print_exc()
raise PipelineCreateException("[Voice Changer] exception! loading inferencer")
# Embedder 生成
try:
embedder = EmbedderManager.getEmbedder(
modelSlot.embedder,
# emmbedderFilename,
half,
dev,
)
except Exception as e:
print("[Voice Changer] exception! loading embedder", e, dev)
traceback.print_exc()
raise PipelineCreateException("[Voice Changer] exception! loading embedder")
# pitchExtractor
pitchExtractor = PitchExtractorManager.getPitchExtractor(f0Detector, gpu)
# index, feature
indexPath = os.path.join(params.model_dir, str(modelSlot.slotIndex), os.path.basename(modelSlot.indexFile))
index = _loadIndex(indexPath)
pipeline = Pipeline(
embedder,
inferencer,
pitchExtractor,
index,
modelSlot.samplingRate,
dev,
half,
)
return pipeline | null |
13,908 | import torch
from torch.nn import functional as F
import numpy as np
DEFAULT_MIN_BIN_WIDTH = 1e-3
DEFAULT_MIN_BIN_HEIGHT = 1e-3
DEFAULT_MIN_DERIVATIVE = 1e-3
def unconstrained_rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails="linear",
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
def rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
left=0.0,
right=1.0,
bottom=0.0,
top=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
def piecewise_rational_quadratic_transform(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails=None,
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
if tails is None:
spline_fn = rational_quadratic_spline
spline_kwargs = {}
else:
spline_fn = unconstrained_rational_quadratic_spline
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
outputs, logabsdet = spline_fn(inputs=inputs, unnormalized_widths=unnormalized_widths, unnormalized_heights=unnormalized_heights, unnormalized_derivatives=unnormalized_derivatives, inverse=inverse, min_bin_width=min_bin_width, min_bin_height=min_bin_height, min_derivative=min_derivative, **spline_kwargs)
return outputs, logabsdet | null |
13,909 | import math
import torch
from torch.nn import functional as F
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std) | null |
13,910 | import math
import torch
from torch.nn import functional as F
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2) | null |
13,911 | import math
import torch
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `kl_divergence` function. Write a Python function `def kl_divergence(m_p, logs_p, m_q, logs_q)` to solve the following problem:
KL(P||Q)
Here is the function:
def kl_divergence(m_p, logs_p, m_q, logs_q):
"""KL(P||Q)"""
kl = (logs_q - logs_p) - 0.5
kl += 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
return kl | KL(P||Q) |
13,912 | import math
import torch
from torch.nn import functional as F
def rand_gumbel(shape):
"""Sample from the Gumbel distribution, protect from overflows."""
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
return -torch.log(-torch.log(uniform_samples))
def rand_gumbel_like(x):
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
return g | null |
13,913 | import math
import torch
from torch.nn import functional as F
def slice_segments2(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, idx_str:idx_end]
return ret | null |
13,914 | import math
import torch
from torch.nn import functional as F
def slice_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, :, idx_str:idx_end]
return ret
def rand_slice_segments(x, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size + 1
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
return ret, ids_str | null |
13,915 | import math
import torch
from torch.nn import functional as F
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
position = torch.arange(length, dtype=torch.float)
num_timescales = channels // 2
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1)
inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
signal = F.pad(signal, [0, 0, 0, channels % 2])
signal = signal.view(1, channels, length)
return signal
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
b, channels, length = x.size()
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
return x + signal.to(dtype=x.dtype, device=x.device) | null |
13,916 | import math
import torch
from torch.nn import functional as F
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
position = torch.arange(length, dtype=torch.float)
num_timescales = channels // 2
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1)
inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
signal = F.pad(signal, [0, 0, 0, channels % 2])
signal = signal.view(1, channels, length)
return signal
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
b, channels, length = x.size()
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) | null |
13,917 | import math
import torch
from torch.nn import functional as F
def subsequent_mask(length):
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
return mask | null |
13,918 | import math
import torch
from torch.nn import functional as F
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts | null |
13,919 | import math
import torch
from torch.nn import functional as F
def convert_pad_shape(pad_shape):
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
def shift_1d(x):
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
return x | null |
13,920 | import math
import torch
from torch.nn import functional as F
def convert_pad_shape(pad_shape):
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
The provided code snippet includes necessary dependencies for implementing the `generate_path` function. Write a Python function `def generate_path(duration, mask)` to solve the following problem:
duration: [b, 1, t_x] mask: [b, 1, t_y, t_x]
Here is the function:
def generate_path(duration, mask):
"""
duration: [b, 1, t_x]
mask: [b, 1, t_y, t_x]
"""
b, _, t_y, t_x = mask.shape
cum_duration = torch.cumsum(duration, -1)
cum_duration_flat = cum_duration.view(b * t_x)
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
path = path.view(b, t_x, t_y)
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
path = path.unsqueeze(1).transpose(2, 3) * mask
return path | duration: [b, 1, t_x] mask: [b, 1, t_y, t_x] |
13,921 | import math
import torch
from torch.nn import functional as F
def clip_grad_value_(parameters, clip_value, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if clip_value is not None:
clip_value = float(clip_value)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
if clip_value is not None:
p.grad.data.clamp_(min=-clip_value, max=clip_value)
total_norm = total_norm ** (1.0 / norm_type)
return total_norm | null |
13,922 | import numpy as np
import torch
from torch.nn import functional as F
DEFAULT_MIN_BIN_WIDTH = 1e-3
DEFAULT_MIN_BIN_HEIGHT = 1e-3
DEFAULT_MIN_DERIVATIVE = 1e-3
def unconstrained_rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails="linear",
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
if tails == "linear":
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
constant = np.log(np.exp(1 - min_derivative) - 1)
unnormalized_derivatives[..., 0] = constant
unnormalized_derivatives[..., -1] = constant
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
else:
raise RuntimeError("{} tails are not implemented.".format(tails))
(
outputs[inside_interval_mask],
logabsdet[inside_interval_mask],
) = rational_quadratic_spline(
inputs=inputs[inside_interval_mask],
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
inverse=inverse,
left=-tail_bound,
right=tail_bound,
bottom=-tail_bound,
top=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
)
return outputs, logabsdet
def rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
left=0.0,
right=1.0,
bottom=0.0,
top=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
if torch.min(inputs) < left or torch.max(inputs) > right:
raise ValueError("Input to a transform is not within its domain")
num_bins = unnormalized_widths.shape[-1]
if min_bin_width * num_bins > 1.0:
raise ValueError("Minimal bin width too large for the number of bins")
if min_bin_height * num_bins > 1.0:
raise ValueError("Minimal bin height too large for the number of bins")
widths = F.softmax(unnormalized_widths, dim=-1)
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
cumwidths = torch.cumsum(widths, dim=-1)
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
cumwidths = (right - left) * cumwidths + left
cumwidths[..., 0] = left
cumwidths[..., -1] = right
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
heights = F.softmax(unnormalized_heights, dim=-1)
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
cumheights = torch.cumsum(heights, dim=-1)
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
cumheights = (top - bottom) * cumheights + bottom
cumheights[..., 0] = bottom
cumheights[..., -1] = top
heights = cumheights[..., 1:] - cumheights[..., :-1]
if inverse:
bin_idx = searchsorted(cumheights, inputs)[..., None]
else:
bin_idx = searchsorted(cumwidths, inputs)[..., None]
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
delta = heights / widths
input_delta = delta.gather(-1, bin_idx)[..., 0]
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
input_heights = heights.gather(-1, bin_idx)[..., 0]
if inverse:
a = (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
) + input_heights * (input_delta - input_derivatives)
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
)
c = -input_delta * (inputs - input_cumheights)
discriminant = b.pow(2) - 4 * a * c
assert (discriminant >= 0).all()
root = (2 * c) / (-b - torch.sqrt(discriminant))
outputs = root * input_bin_widths + input_cumwidths
theta_one_minus_theta = root * (1 - root)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * root.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - root).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, -logabsdet
else:
theta = (inputs - input_cumwidths) / input_bin_widths
theta_one_minus_theta = theta * (1 - theta)
numerator = input_heights * (
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
outputs = input_cumheights + numerator / denominator
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * theta.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - theta).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, logabsdet
def piecewise_rational_quadratic_transform(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails=None,
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
if tails is None:
spline_fn = rational_quadratic_spline
spline_kwargs = {}
else:
spline_fn = unconstrained_rational_quadratic_spline
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
outputs, logabsdet = spline_fn(
inputs=inputs,
unnormalized_widths=unnormalized_widths,
unnormalized_heights=unnormalized_heights,
unnormalized_derivatives=unnormalized_derivatives,
inverse=inverse,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
**spline_kwargs
)
return outputs, logabsdet | null |
13,923 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def load_audio(file: str, sr):
try:
# https://github.com/openai/whisper/blob/main/whisper/audio.py#L26
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
file = (
file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
) # Prevent small white copy path head and tail with spaces and " and return
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except Exception as e:
raise RuntimeError(f"Failed to load audio: {e}")
return np.frombuffer(out, np.float32).flatten() | null |
13,924 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def find_empty_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port | null |
13,925 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
saved_state_dict = checkpoint_dict["model"]
if hasattr(model, "module"):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
new_state_dict = {}
for k, v in state_dict.items(): # 模型需要的shape
try:
new_state_dict[k] = saved_state_dict[k]
if saved_state_dict[k].shape != state_dict[k].shape:
print(
f"shape-{k}-mismatch|need-{state_dict[k].shape}|get-{saved_state_dict[k].shape}"
)
if saved_state_dict[k].dim() == 2: # NOTE: check is this ok?
# for embedded input 256 <==> 768
# this achieves we can continue training from original's pretrained checkpoints when using embedder that 768-th dim output etc.
if saved_state_dict[k].dtype == torch.half:
new_state_dict[k] = (
F.interpolate(
saved_state_dict[k].float().unsqueeze(0).unsqueeze(0),
size=state_dict[k].shape,
mode="bilinear",
)
.half()
.squeeze(0)
.squeeze(0)
)
else:
new_state_dict[k] = (
F.interpolate(
saved_state_dict[k].unsqueeze(0).unsqueeze(0),
size=state_dict[k].shape,
mode="bilinear",
)
.squeeze(0)
.squeeze(0)
)
print(
"interpolated new_state_dict",
k,
"from",
saved_state_dict[k].shape,
"to",
new_state_dict[k].shape,
)
else:
raise KeyError
except Exception as e:
# print(traceback.format_exc())
print(f"{k} is not in the checkpoint")
print("error: %s" % e)
new_state_dict[k] = v # 模型自带的随机值
if hasattr(model, "module"):
model.module.load_state_dict(new_state_dict, strict=False)
else:
model.load_state_dict(new_state_dict, strict=False)
print("Loaded model weights")
epoch = checkpoint_dict["epoch"]
learning_rate = checkpoint_dict["learning_rate"]
if optimizer is not None and load_opt == 1:
optimizer.load_state_dict(checkpoint_dict["optimizer"])
print("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, epoch))
return model, optimizer, learning_rate, epoch | null |
13,926 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def save_state(model, optimizer, learning_rate, epoch, checkpoint_path):
print(
"Saving model and optimizer state at epoch {} to {}".format(
epoch, checkpoint_path
)
)
if hasattr(model, "module"):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
torch.save(
{
"model": state_dict,
"epoch": epoch,
"optimizer": optimizer.state_dict(),
"learning_rate": learning_rate,
},
checkpoint_path,
) | null |
13,927 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def summarize(
writer,
global_step,
scalars={},
histograms={},
images={},
audios={},
audio_sampling_rate=22050,
):
for k, v in scalars.items():
writer.add_scalar(k, v, global_step)
for k, v in histograms.items():
writer.add_histogram(k, v, global_step)
for k, v in images.items():
writer.add_image(k, v, global_step, dataformats="HWC")
for k, v in audios.items():
writer.add_audio(k, v, global_step, audio_sampling_rate) | null |
13,928 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
filelist = glob.glob(os.path.join(dir_path, regex))
if len(filelist) == 0:
return None
filelist.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
filepath = filelist[-1]
return filepath | null |
13,929 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def plot_spectrogram_to_numpy(spectrogram):
fig, ax = plt.subplots(figsize=(10, 2))
im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
return data | null |
13,930 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def plot_alignment_to_numpy(alignment, info=None):
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(
alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
)
fig.colorbar(im, ax=ax)
xlabel = "Decoder timestep"
if info is not None:
xlabel += "\n\n" + info
plt.xlabel(xlabel)
plt.ylabel("Encoder timestep")
plt.tight_layout()
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
return data | null |
13,931 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate | null |
13,932 | import glob
import logging
import os
import shutil
import socket
import sys
import ffmpeg
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import torch
from scipy.io.wavfile import read
from torch.nn import functional as F
from modules.shared import ROOT_DIR
from .config import TrainConfig
class TrainConfig(BaseModel):
version: Literal["voras"] = "voras"
train: TrainConfigTrain
data: TrainConfigData
model: TrainConfigModel
def load_config(training_dir: str, sample_rate: int, emb_channels: int):
if emb_channels == 256:
config_path = os.path.join(ROOT_DIR, "configs", f"{sample_rate}.json")
else:
config_path = os.path.join(
ROOT_DIR, "configs", f"{sample_rate}-{emb_channels}.json"
)
config_save_path = os.path.join(training_dir, "config.json")
shutil.copyfile(config_path, config_save_path)
return TrainConfig.parse_file(config_save_path) | null |
13,933 | import math
import numpy as np
import scipy
import torch
from torch import nn
from torch.nn import Conv1d, Conv2d
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from torchaudio.functional.functional import _hz_to_mel, _mel_to_hz
from . import commons, modules
from .commons import get_padding, init_weights
from .transforms import piecewise_rational_quadratic_transform
def symexp(x: torch.Tensor) -> torch.Tensor:
return torch.sign(x) * (torch.exp(x.abs()) - 1) | null |
13,938 | import math
import torch
from torch.nn import functional as F
def slice_segments2(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
r = x[i, idx_str:idx_end]
ret[i, : r.size(0)] = r
return ret | null |
13,939 | import math
import torch
from torch.nn import functional as F
def slice_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
r = x[i, :, idx_str:idx_end]
ret[i, :, : r.size(1)] = r
return ret
def rand_slice_segments(x, x_lengths, segment_size=4, ids_str=None):
b, d, t = x.size()
if ids_str is None:
ids_str = torch.zeros([b]).to(device=x.device, dtype=x_lengths.dtype)
ids_str_max = torch.maximum(torch.zeros_like(x_lengths).to(device=x_lengths.device, dtype=x_lengths.dtype), x_lengths - segment_size + 1 - ids_str)
ids_str += (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
return ret, ids_str | null |
13,947 | from typing import Dict, Any
import os
from collections import OrderedDict
import torch
from voice_changer.ModelSlotManager import ModelSlotManager
from voice_changer.utils.ModelMerger import ModelMergerRequest
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
class ModelSlotManager:
_instance = None
def __init__(self, model_dir: str):
self.model_dir = model_dir
self.modelSlots = loadAllSlotInfo(self.model_dir)
logger.debug(f"[MODEL SLOT INFO] {self.modelSlots}")
def get_instance(cls, model_dir: str):
if cls._instance is None:
cls._instance = cls(model_dir)
return cls._instance
def _save_model_slot(self, slotIndex: int, slotInfo: ModelSlots):
saveSlotInfo(self.model_dir, slotIndex, slotInfo)
self.modelSlots = loadAllSlotInfo(self.model_dir)
def _load_model_slot(self, slotIndex: int):
return self.modelSlots[slotIndex]
def _search_model_slot(self, slotIndex: StaticSlot):
target = [x for x in self.modelSlots if x.slotIndex == slotIndex]
if len(target) > 0:
return target[0]
else:
return None
def getAllSlotInfo(self, reload: bool = False):
if reload:
self.modelSlots = loadAllSlotInfo(self.model_dir)
return self.modelSlots
def get_slot_info(self, slotIndex: int | StaticSlot):
if slotIndex == "Beatrice-JVS":
return self._search_model_slot(slotIndex)
else:
return self._load_model_slot(slotIndex)
def save_model_slot(self, slotIndex: int, slotInfo: ModelSlots):
self._save_model_slot(slotIndex, slotInfo)
def update_model_info(self, newData: str):
logger.info(f"[Voice Changer] UPDATE MODEL INFO, {newData}")
newDataDict = json.loads(newData)
slotInfo = self._load_model_slot(newDataDict["slot"])
if newDataDict["key"] == "speakers":
setattr(slotInfo, newDataDict["key"], json.loads(newDataDict["val"]))
else:
setattr(slotInfo, newDataDict["key"], newDataDict["val"])
self._save_model_slot(newDataDict["slot"], slotInfo)
def store_model_assets(self, params: str):
paramsDict = json.loads(params)
uploadPath = os.path.join(UPLOAD_DIR, paramsDict["file"])
storeDir = os.path.join(self.model_dir, str(paramsDict["slot"]))
storePath = os.path.join(
storeDir,
paramsDict["file"],
)
try:
shutil.move(uploadPath, storePath)
slotInfo = self._load_model_slot(paramsDict["slot"])
setattr(slotInfo, paramsDict["name"], storePath)
self._save_model_slot(paramsDict["slot"], slotInfo)
except Exception as e:
logger.info(f"[Voice Changer] Exception: {e}")
logger.error(e)
class ModelMergerRequest:
voiceChangerType: VoiceChangerType
command: str
files: list[MergeElement]
class VoiceChangerParams:
model_dir: str
content_vec_500: str
content_vec_500_onnx: str
content_vec_500_onnx_on: bool
hubert_base: str
hubert_base_jp: str
hubert_soft: str
nsf_hifigan: str
sample_mode: str
crepe_onnx_full: str
crepe_onnx_tiny: str
rmvpe: str
rmvpe_onnx: str
whisper_tiny: str
def merge_model(params: VoiceChangerParams, request: ModelMergerRequest):
def extract(ckpt: Dict[str, Any]):
a = ckpt["model"]
opt: Dict[str, Any] = OrderedDict()
opt["weight"] = {}
for key in a.keys():
if "enc_q" in key:
continue
opt["weight"][key] = a[key]
return opt
def load_weight(path: str):
print(f"Loading {path}...")
state_dict = torch.load(path, map_location="cpu")
if "model" in state_dict:
weight = extract(state_dict)
else:
weight = state_dict["weight"]
return weight, state_dict
files = request.files
if len(files) == 0:
print("no merge file..............")
raise RuntimeError("no merge file..............")
weights = []
alphas = []
slotManager = ModelSlotManager.get_instance(params.model_dir)
for f in files:
strength = f.strength
if strength == 0:
continue
slotInfo = slotManager.get_slot_info(f.slotIndex)
filename = os.path.join(params.model_dir, str(f.slotIndex), os.path.basename(slotInfo.modelFile)) # slotInfo.modelFileはv.1.5.3.11以前はmodel_dirから含まれている。
weight, state_dict = load_weight(filename)
weights.append(weight)
alphas.append(f.strength)
alphas = [x / sum(alphas) for x in alphas]
for weight in weights:
if sorted(list(weight.keys())) != sorted(list(weights[0].keys())):
raise RuntimeError("Failed to merge models.")
merged: Dict[str, Any] = OrderedDict()
merged["weight"] = {}
print("merge start.")
for key in weights[0].keys():
merged["weight"][key] = 0
for i, weight in enumerate(weights):
merged["weight"][key] += weight[key] * alphas[i]
print("merge done. write metadata.")
merged["config"] = state_dict["config"]
merged["params"] = state_dict["params"] if "params" in state_dict else None
merged["version"] = state_dict["version"] if "version" in state_dict else None
merged["sr"] = state_dict["sr"]
merged["f0"] = state_dict["f0"]
merged["info"] = state_dict["info"]
merged["embedder_name"] = state_dict["embedder_name"] if "embedder_name" in state_dict else None
merged["embedder_output_layer"] = state_dict["embedder_output_layer"] if "embedder_output_layer" in state_dict else None
print("write metadata done.")
return merged | null |
13,948 | import sys
import os
from data.ModelSlot import SoVitsSvc40ModelSlot
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
from voice_changer.utils.VoiceChangerModel import AudioInOut, VoiceChangerModel
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
from dataclasses import dataclass, asdict, field
import numpy as np
import torch
import onnxruntime
import pyworld as pw
from .models.models import SynthesizerTrn
from .models.utils import (
interpolate_f0,
get_hparams_from_file,
load_checkpoint,
repeat_expand_2d,
get_hubert_content,
)
from .models.cluster import get_cluster_model, get_cluster_center_result
from fairseq import checkpoint_utils
import librosa
from Exceptions import NoModeLoadedException
def resize_f0(x, target_len):
source = np.array(x)
source[source < 0.001] = np.nan
target = np.interp(
np.arange(0, len(source) * target_len, len(source)) / target_len,
np.arange(0, len(source)),
source,
)
res = np.nan_to_num(target)
return res
def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
if p_len is None:
p_len = wav_numpy.shape[0] // hop_length
f0, t = pw.dio(
wav_numpy.astype(np.double),
fs=sampling_rate,
f0_ceil=800,
frame_period=1000 * hop_length / sampling_rate,
)
f0 = pw.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate)
for index, pitch in enumerate(f0):
f0[index] = round(pitch, 1)
return resize_f0(f0, p_len) | null |
13,949 | import sys
import os
from data.ModelSlot import SoVitsSvc40ModelSlot
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
from voice_changer.utils.VoiceChangerModel import AudioInOut, VoiceChangerModel
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
from dataclasses import dataclass, asdict, field
import numpy as np
import torch
import onnxruntime
import pyworld as pw
from .models.models import SynthesizerTrn
from .models.utils import (
interpolate_f0,
get_hparams_from_file,
load_checkpoint,
repeat_expand_2d,
get_hubert_content,
)
from .models.cluster import get_cluster_model, get_cluster_center_result
from fairseq import checkpoint_utils
import librosa
from Exceptions import NoModeLoadedException
def resize_f0(x, target_len):
source = np.array(x)
source[source < 0.001] = np.nan
target = np.interp(
np.arange(0, len(source) * target_len, len(source)) / target_len,
np.arange(0, len(source)),
source,
)
res = np.nan_to_num(target)
return res
def compute_f0_harvest(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
if p_len is None:
p_len = wav_numpy.shape[0] // hop_length
f0, t = pw.harvest(
wav_numpy.astype(np.double),
fs=sampling_rate,
frame_period=5.5,
f0_floor=71.0,
f0_ceil=1000.0,
)
for index, pitch in enumerate(f0):
f0[index] = round(pitch, 1)
return resize_f0(f0, p_len) | null |
13,950 | import sys
import os
from data.ModelSlot import SoVitsSvc40ModelSlot
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
from voice_changer.utils.VoiceChangerModel import AudioInOut, VoiceChangerModel
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
from dataclasses import dataclass, asdict, field
import numpy as np
import torch
import onnxruntime
import pyworld as pw
from .models.models import SynthesizerTrn
from .models.utils import (
interpolate_f0,
get_hparams_from_file,
load_checkpoint,
repeat_expand_2d,
get_hubert_content,
)
from .models.cluster import get_cluster_model, get_cluster_center_result
from fairseq import checkpoint_utils
import librosa
from Exceptions import NoModeLoadedException
def get_hubert_content_layer9(hmodel, wav_16k_tensor):
feats = wav_16k_tensor
if feats.dim() == 2: # double channels
feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
feats = feats.view(1, -1)
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
inputs = {
"source": feats.to(wav_16k_tensor.device),
"padding_mask": padding_mask.to(wav_16k_tensor.device),
"output_layer": 9, # layer 9
}
with torch.no_grad():
logits = hmodel.extract_features(**inputs)
return logits[0].transpose(1, 2) | null |
13,951 | from typing import Optional, Union
import numpy as np
import torch
import torchcrepe
from torch import nn
from torch.nn import functional as F
import scipy
The provided code snippet includes necessary dependencies for implementing the `repeat_expand` function. Write a Python function `def repeat_expand(content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest")` to solve the following problem:
Repeat content to target length. This is a wrapper of torch.nn.functional.interpolate. Args: content (torch.Tensor): tensor target_len (int): target length mode (str, optional): interpolation mode. Defaults to "nearest". Returns: torch.Tensor: tensor
Here is the function:
def repeat_expand(content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest"):
"""Repeat content to target length.
This is a wrapper of torch.nn.functional.interpolate.
Args:
content (torch.Tensor): tensor
target_len (int): target length
mode (str, optional): interpolation mode. Defaults to "nearest".
Returns:
torch.Tensor: tensor
"""
ndim = content.ndim
if content.ndim == 1:
content = content[None, None]
elif content.ndim == 2:
content = content[None]
assert content.ndim == 3
is_np = isinstance(content, np.ndarray)
if is_np:
content = torch.from_numpy(content)
results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
if is_np:
results = results.numpy()
if ndim == 1:
return results[0, 0]
elif ndim == 2:
return results[0] | Repeat content to target length. This is a wrapper of torch.nn.functional.interpolate. Args: content (torch.Tensor): tensor target_len (int): target length mode (str, optional): interpolation mode. Defaults to "nearest". Returns: torch.Tensor: tensor |
13,952 | import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
def dynamic_range_decompression_torch(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output | null |
13,953 | import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
hann_window = {}
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
if torch.min(y) < -1.0:
print("min value is ", torch.min(y))
if torch.max(y) > 1.0:
print("max value is ", torch.max(y))
global hann_window
dtype_device = str(y.dtype) + "_" + str(y.device)
wnsize_dtype_device = str(win_size) + "_" + dtype_device
if wnsize_dtype_device not in hann_window:
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect")
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], center=center, pad_mode="reflect", normalized=False, onesided=True, return_complex=False)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
return spec | null |
13,954 | import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
mel_basis = {}
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
global mel_basis
dtype_device = str(spec.dtype) + "_" + str(spec.device)
fmax_dtype_device = str(fmax) + "_" + dtype_device
if fmax_dtype_device not in mel_basis:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
spec = spectral_normalize_torch(spec)
return spec | null |
13,955 | import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.0:
print("min value is ", torch.min(y))
if torch.max(y) > 1.0:
print("max value is ", torch.max(y))
global mel_basis, hann_window
dtype_device = str(y.dtype) + "_" + str(y.device)
fmax_dtype_device = str(fmax) + "_" + dtype_device
wnsize_dtype_device = str(win_size) + "_" + dtype_device
if fmax_dtype_device not in mel_basis:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
if wnsize_dtype_device not in hann_window:
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect")
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], center=center, pad_mode="reflect", normalized=False, onesided=True, return_complex=False)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
spec = spectral_normalize_torch(spec)
return spec | null |
13,956 | import torch
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
rl = rl.float().detach()
gl = gl.float()
loss += torch.mean(torch.abs(rl - gl))
return loss * 2 | null |
13,957 | import torch
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
dr = dr.float()
dg = dg.float()
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg**2)
loss += r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses | null |
13,958 | import torch
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
dg = dg.float()
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
return loss, gen_losses | null |
13,959 | import torch
The provided code snippet includes necessary dependencies for implementing the `kl_loss` function. Write a Python function `def kl_loss(z_p, logs_q, m_p, logs_p, z_mask)` to solve the following problem:
z_p, logs_q: [b, h, t_t] m_p, logs_p: [b, h, t_t]
Here is the function:
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
"""
z_p, logs_q: [b, h, t_t]
m_p, logs_p: [b, h, t_t]
"""
z_p = z_p.float()
logs_q = logs_q.float()
m_p = m_p.float()
logs_p = logs_p.float()
z_mask = z_mask.float()
# print(logs_p)
kl = logs_p - logs_q - 0.5
kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
kl = torch.sum(kl * z_mask)
l = kl / torch.sum(z_mask)
return l | z_p, logs_q: [b, h, t_t] m_p, logs_p: [b, h, t_t] |
13,960 | import math
import torch
from torch.nn import functional as F
def slice_pitch_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, idx_str:idx_end]
return ret
def slice_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, :, idx_str:idx_end]
return ret
def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size + 1
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
return ret, ret_pitch, ids_str | null |
13,963 | import math
import torch
from torch.nn import functional as F
def intersperse(lst, item):
result = [item] * (len(lst) * 2 + 1)
result[1::2] = lst
return result | null |
13,967 | import math
import torch
from torch.nn import functional as F
def slice_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, :, idx_str:idx_end]
return ret
def rand_spec_segments(x, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
return ret, ids_str | null |
13,973 | import math
import torch
from torch.nn import functional as F
def convert_pad_shape(pad_shape):
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
The provided code snippet includes necessary dependencies for implementing the `generate_path` function. Write a Python function `def generate_path(duration, mask)` to solve the following problem:
duration: [b, 1, t_x] mask: [b, 1, t_y, t_x]
Here is the function:
def generate_path(duration, mask):
"""
duration: [b, 1, t_x]
mask: [b, 1, t_y, t_x]
"""
device = duration.device # NOQA
b, _, t_y, t_x = mask.shape
cum_duration = torch.cumsum(duration, -1)
cum_duration_flat = cum_duration.view(b * t_x)
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
path = path.view(b, t_x, t_y)
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
path = path.unsqueeze(1).transpose(2, 3) * mask
return path | duration: [b, 1, t_x] mask: [b, 1, t_y, t_x] |
13,975 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
The provided code snippet includes necessary dependencies for implementing the `deprecated` function. Write a Python function `def deprecated(func)` to solve the following problem:
This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.
Here is the function:
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning) # turn off filter
warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func | This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. |
13,976 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
def normalize_f0(f0, x_mask, uv, random_scale=True):
# calculate means based on x_mask
uv_sum = torch.sum(uv, dim=1, keepdim=True)
uv_sum[uv_sum == 0] = 9999
means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum
if random_scale:
factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device)
else:
factor = torch.ones(f0.shape[0], 1).to(f0.device)
# normalize f0 based on means and factor
f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
if torch.isnan(f0_norm).any():
exit(0)
return f0_norm * x_mask | null |
13,977 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
f0_max = 1100.0
f0_min = 50.0
class CrepePitchExtractor(BasePitchExtractor):
def __init__(
self,
hop_length: int = 512,
f0_min: float = 50.0,
f0_max: float = 1100.0,
threshold: float = 0.05,
keep_zeros: bool = False,
device=None,
model: Literal["full", "tiny"] = "full",
use_fast_filters: bool = True,
):
def __call__(self, x, sampling_rate=44100, pad_to=None):
def compute_f0_uv_torchcrepe(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512, device=None):
from .modules.crepe import CrepePitchExtractor
x = wav_numpy
if p_len is None:
p_len = x.shape[0] // hop_length
else:
assert abs(p_len - x.shape[0] // hop_length) < 4, "pad length error"
f0_min = 50
f0_max = 1100
F0Creper = CrepePitchExtractor(hop_length=hop_length, f0_min=f0_min, f0_max=f0_max, device=device)
f0, uv = F0Creper(x[None, :].float(), sampling_rate, pad_to=p_len)
return f0, uv | null |
13,978 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
MATPLOTLIB_FLAG = False
def plot_data_to_numpy(x, y):
global MATPLOTLIB_FLAG
if not MATPLOTLIB_FLAG:
import matplotlib
matplotlib.use("Agg")
MATPLOTLIB_FLAG = True
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
import matplotlib.pylab as plt
import numpy as np
fig, ax = plt.subplots(figsize=(10, 2))
plt.plot(x)
plt.plot(y)
plt.tight_layout()
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
return data | null |
13,979 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
The provided code snippet includes necessary dependencies for implementing the `interpolate_f0` function. Write a Python function `def interpolate_f0(f0)` to solve the following problem:
对F0进行插值处理
Here is the function:
def interpolate_f0(f0):
"""
对F0进行插值处理
"""
data = np.reshape(f0, (f0.size, 1))
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
vuv_vector[data > 0.0] = 1.0
vuv_vector[data <= 0.0] = 0.0
ip_data = data
frame_number = data.size
last_value = 0.0
for i in range(frame_number):
if data[i] <= 0.0:
j = i + 1
for j in range(i + 1, frame_number):
if data[j] > 0.0:
break
if j < frame_number - 1:
if last_value > 0.0:
step = (data[j] - data[i - 1]) / float(j - i)
for k in range(i, j):
ip_data[k] = data[i - 1] + step * (k - i + 1)
else:
for k in range(i, j):
ip_data[k] = data[j]
else:
for k in range(i, frame_number):
ip_data[k] = last_value
else:
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
last_value = data[i]
return ip_data[:, 0], vuv_vector[:, 0] | 对F0进行插值处理 |
13,980 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
f0_max = 1100.0
f0_min = 50.0
def compute_f0_parselmouth(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
import parselmouth
x = wav_numpy
if p_len is None:
p_len = x.shape[0] // hop_length
else:
assert abs(p_len - x.shape[0] // hop_length) < 4, "pad length error"
time_step = hop_length / sampling_rate * 1000
f0_min = 50
f0_max = 1100
f0 = parselmouth.Sound(x, sampling_rate).to_pitch_ac(time_step=time_step / 1000, voicing_threshold=0.6, pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array["frequency"]
pad_size = (p_len - len(f0) + 1) // 2
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
return f0 | null |
13,981 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
def resize_f0(x, target_len):
source = np.array(x)
source[source < 0.001] = np.nan
target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)), source)
res = np.nan_to_num(target)
return res
def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
import pyworld
if p_len is None:
p_len = wav_numpy.shape[0] // hop_length
f0, t = pyworld.dio(
wav_numpy.astype(np.double),
fs=sampling_rate,
f0_ceil=800,
frame_period=1000 * hop_length / sampling_rate,
)
f0 = pyworld.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate)
for index, pitch in enumerate(f0):
f0[index] = round(pitch, 1)
return resize_f0(f0, p_len) | null |
13,982 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
f0_bin = 256
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
def f0_to_coarse(f0):
is_torch = isinstance(f0, torch.Tensor)
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
return f0_coarse | null |
13,983 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
def get_hubert_model():
vec_path = "hubert/checkpoint_best_legacy_500.pt"
print("load model(s) from {}".format(vec_path))
from fairseq import checkpoint_utils
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[vec_path],
suffix="",
)
model = models[0]
model.eval()
return model | null |
13,984 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
def get_hubert_content(hmodel, wav_16k_tensor):
feats = wav_16k_tensor
if feats.dim() == 2: # double channels
feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
feats = feats.view(1, -1)
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
inputs = {
"source": feats.to(wav_16k_tensor.device),
"padding_mask": padding_mask.to(wav_16k_tensor.device),
"output_layer": 9, # layer 9
}
with torch.no_grad():
logits = hmodel.extract_features(**inputs)
feats = hmodel.final_proj(logits[0])
return feats.transpose(1, 2) | null |
13,985 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
def get_content(cmodel, y):
with torch.no_grad():
c = cmodel.extract_features(y.squeeze(1))[0]
c = c.transpose(1, 2)
return c | null |
13,986 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
logger = logging
def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
iteration = checkpoint_dict["iteration"]
learning_rate = checkpoint_dict["learning_rate"]
if optimizer is not None and not skip_optimizer and checkpoint_dict["optimizer"] is not None:
optimizer.load_state_dict(checkpoint_dict["optimizer"])
saved_state_dict = checkpoint_dict["model"]
if hasattr(model, "module"):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
new_state_dict = {}
for k, v in state_dict.items():
try:
# assert "dec" in k or "disc" in k
# print("load", k)
new_state_dict[k] = saved_state_dict[k]
assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
except:
print("error, %s is not in the checkpoint" % k)
logger.info("%s is not in the checkpoint" % k)
new_state_dict[k] = v
if hasattr(model, "module"):
model.module.load_state_dict(new_state_dict)
else:
model.load_state_dict(new_state_dict)
print("load ")
logger.info("Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration | null |
13,987 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
logger = logging
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
logger.info("Saving model and optimizer state at iteration {} to {}".format(iteration, checkpoint_path))
if hasattr(model, "module"):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
torch.save({"model": state_dict, "iteration": iteration, "optimizer": optimizer.state_dict(), "learning_rate": learning_rate}, checkpoint_path) | null |
13,988 | import os
import glob
import re
import sys
import argparse
import logging
import json
import subprocess
import warnings
import functools
import numpy as np
from scipy.io.wavfile import read
import torch
logger = logging
The provided code snippet includes necessary dependencies for implementing the `clean_checkpoints` function. Write a Python function `def clean_checkpoints(path_to_models="logs/44k/", n_ckpts_to_keep=2, sort_by_time=True)` to solve the following problem:
Freeing up space by deleting saved ckpts Arguments: path_to_models -- Path to the model directory n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth sort_by_time -- True -> chronologically delete ckpts False -> lexicographically delete ckpts
Here is the function:
def clean_checkpoints(path_to_models="logs/44k/", n_ckpts_to_keep=2, sort_by_time=True):
"""Freeing up space by deleting saved ckpts
Arguments:
path_to_models -- Path to the model directory
n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
sort_by_time -- True -> chronologically delete ckpts
False -> lexicographically delete ckpts
"""
ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
name_key = lambda _f: int(re.compile("._(\d+)\.pth").match(_f).group(1)) # NOQA
time_key = lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)) # NOQA
sort_key = time_key if sort_by_time else name_key
x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith("_0.pth")], key=sort_key) # NOQA
to_del = [os.path.join(path_to_models, fn) for fn in (x_sorted("G")[:-n_ckpts_to_keep] + x_sorted("D")[:-n_ckpts_to_keep])]
del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") # NOQA
del_routine = lambda x: [os.remove(x), del_info(x)] # NOQA
rs = [del_routine(fn) for fn in to_del] # NOQA | Freeing up space by deleting saved ckpts Arguments: path_to_models -- Path to the model directory n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth sort_by_time -- True -> chronologically delete ckpts False -> lexicographically delete ckpts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.