File size: 7,774 Bytes
e6d49fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
import sys
import time
from logging import getLogger
import onnxruntime
import numpy as np
import soundfile as sf
import ailia
# import original modules
sys.path.append('../../util')
from model_utils import check_and_download_models # noqa
from arg_utils import get_base_parser, get_savepath, update_parser # noqa
logger = getLogger(__name__)
# ======================
# Parameters
# ======================
WEIGHT1_PATH = "dtln1.onnx"
MODEL1_PATH = "dtln1.onnx.prototxt"
WEIGHT2_PATH = "dtln2.onnx"
MODEL2_PATH = "dtln2.onnx.prototxt"
REMOTE_PATH = 'https://storage.googleapis.com/ailia-models/dtln/'
SAMPLE_RATE = 16000
WAV_PATH = '1221-135766-0000.wav'
SAVE_WAV_PATH = 'output.wav'
# ======================
# Arguemnt Parser Config
# ======================
parser = get_base_parser(
'Dual-signal Transformation LSTM Network', WAV_PATH, SAVE_WAV_PATH, input_ftype='audio'
)
parser.add_argument(
'--onnx',
action='store_true',
help='By default, the ailia SDK is used, but with this option, you can switch to using ONNX Runtime'
)
parser.add_argument(
'--shift',
default=128, type=int,
)
args = update_parser(parser)
block_shift = args.shift
# ======================
# Main functions
# ======================
def predict(audio,models):
block_len = 512
out_file = np.zeros((len(audio)))
# create buffer
in_buffer = np.zeros((block_len)).astype('float32')
out_buffer = np.zeros((block_len)).astype('float32')
# calculate number of blocks
num_blocks = (audio.shape[0] - (block_len-block_shift)) // block_shift
# iterate over the number of blcoks
time_array = []
inp_shape = [(1, 1, 257) ,(1, 2, 128, 2)]
model_input_names_1 = ['input_2', 'input_3']
interpreter_1 = models[0]
interpreter_2 = models[1]
model_inputs_1 = {}
model_inputs_1[model_input_names_1[0]] = np.zeros(
[dim if isinstance(dim, int) else 1 for dim in inp_shape[0]],
dtype=np.float32)
model_inputs_1[model_input_names_1[1]] = np.zeros(
[dim if isinstance(dim, int) else 1 for dim in inp_shape[1]],
dtype=np.float32)
model_input_names_2 = ['input_4','input_5']
model_inputs_2 = {}
inp_shape = [(1, 1, 512) ,(1, 2, 128, 2)]
model_inputs_2[model_input_names_2[0]] = np.zeros(
[dim if isinstance(dim, int) else 1 for dim in inp_shape[0]],
dtype=np.float32)
model_inputs_2[model_input_names_2[1]] = np.zeros(
[dim if isinstance(dim, int) else 1 for dim in inp_shape[1]],
dtype=np.float32)
for idx in range(num_blocks):
start_time = time.time()
# shift values and write to buffer
in_buffer[:-block_shift] = in_buffer[block_shift:]
in_buffer[-block_shift:] = audio[idx*block_shift:(idx*block_shift)+block_shift]
# calculate fft of input block
in_block_fft = np.fft.rfft(in_buffer)
in_mag = np.abs(in_block_fft)
in_phase = np.angle(in_block_fft)
# reshape magnitude to input dimensions
in_mag = np.reshape(in_mag, (1,1,-1)).astype('float32')
# set block to input
model_inputs_1[model_input_names_1[0]] = in_mag
# run calculation
if args.onnx:
model_outputs_1 = interpreter_1.run([],{'input_2':model_inputs_1['input_2'],'input_3':model_inputs_1['input_3']})
else:
inputs = [model_inputs_1['input_2'],model_inputs_1['input_3']]
model_outputs_1 = interpreter_1.run(inputs)
# get the output of the first block
out_mask = model_outputs_1[0]
# set out states back to input
model_inputs_1[model_input_names_1[1]] = model_outputs_1[1]
# calculate the ifft
estimated_complex = in_mag * out_mask * np.exp(1j * in_phase)
estimated_block = np.fft.irfft(estimated_complex)
# reshape the time domain block
estimated_block = np.reshape(estimated_block, (1,1,-1)).astype('float32')
# set tensors to the second block
model_inputs_2[model_input_names_2[0]] = estimated_block
# run calculation
if args.onnx:
model_outputs_2 = interpreter_2.run([],{'input_4':model_inputs_2['input_4'],'input_5':model_inputs_2['input_5']})
else:
inputs = [model_inputs_2['input_4'],model_inputs_2['input_5']]
model_outputs_2 = interpreter_2.run(inputs)
# get output
out_block = model_outputs_2[0]
# set out states back to input
model_inputs_2[model_input_names_2[1]] = model_outputs_2[1]
# shift values and write to buffer
out_buffer[:-block_shift] = out_buffer[block_shift:]
out_buffer[-block_shift:] = np.zeros((block_shift))
out_buffer += np.squeeze(out_block)
# write block to output file
out_file[idx*block_shift:(idx*block_shift)+block_shift] = out_buffer[:block_shift]
time_array.append(time.time()-start_time)
return out_file
def recognize_from_audio(models):
inp_shape = [(1, 1, 257) ,(1, 2, 128, 2)]
model_input_names_1 = ['input_2', 'input_3']
model_inputs_1 = {}
model_inputs_1[model_input_names_1[0]] = np.zeros(
[dim if isinstance(dim, int) else 1 for dim in inp_shape[0]],
dtype=np.float32)
model_inputs_1[model_input_names_1[1]] = np.zeros(
[dim if isinstance(dim, int) else 1 for dim in inp_shape[1]],
dtype=np.float32)
model_input_names_2 = ['input_4','input_5']
model_inputs_2 = {}
inp_shape = [(1, 1, 512) ,(1, 2, 128, 2)]
model_inputs_2[model_input_names_2[0]] = np.zeros(
[dim if isinstance(dim, int) else 1 for dim in inp_shape[0]],
dtype=np.float32)
model_inputs_2[model_input_names_2[1]] = np.zeros(
[dim if isinstance(dim, int) else 1 for dim in inp_shape[1]],
dtype=np.float32)
# input audio loop
for audio_path in args.input:
logger.info(audio_path)
# load audio file
audio,fs = sf.read(audio_path)
# check for sampling rate
if fs != 16000:
print('This model only supports 16k sampling rate.')
continue
# inference
logger.info('Start inference...')
if args.benchmark:
logger.info('BENCHMARK mode')
start = int(round(time.time() * 1000))
output,sr = predict(audio, models)
end = int(round(time.time() * 1000))
estimation_time = (end - start)
logger.info(f'\ttotal processing time {estimation_time} ms')
else:
output = predict(audio, models)
# save result
savepath = get_savepath(args.savepath, audio_path, ext='.wav')
logger.info(f'saved at : {savepath}')
sf.write(savepath, output, fs)
logger.info('Script finished successfully.')
def main():
check_and_download_models(WEIGHT1_PATH, MODEL1_PATH, REMOTE_PATH)
check_and_download_models(WEIGHT2_PATH, MODEL2_PATH, REMOTE_PATH)
env_id = args.env_id
if args.onnx:
models = [onnxruntime.InferenceSession(WEIGHT1_PATH),
onnxruntime.InferenceSession(WEIGHT2_PATH)]
else:
models = [ailia.Net(MODEL1_PATH,WEIGHT1_PATH, env_id = env_id),
ailia.Net(MODEL2_PATH,WEIGHT2_PATH, env_id = env_id)]
# initialize
recognize_from_audio(models)
if __name__ == '__main__':
main()
|