international_voice / examples /make_templates /step_3_move_by_template.py
HoneyTian's picture
update
7661f93
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from collections import defaultdict
from glob import glob
import json
import os
from pathlib import Path
import shutil
from typing import Dict, List, Callable
import cv2 as cv
import numpy as np
from python_speech_features import sigproc
from scipy.io import wavfile
from tqdm import tqdm
from project_settings import project_path
from toolbox.python_speech_features.misc import wave2spectrum_image
from toolbox.cv2.misc import show_image
area_code = 234
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--templates_dir",
default=(project_path / "data/early_media/{area_code}/templates".format(
area_code=area_code
)).as_posix(),
type=str
)
parser.add_argument(
"--wav_dir",
default=(project_path / "data/early_media/{area_code}/wav".format(area_code=area_code)).as_posix(),
type=str
)
args = parser.parse_args()
return args
class AudioTemplateMatch(object):
def __init__(self,
wave_to_spectrum: Callable,
sample_rate: int = 8000,
template_crop: float = 0.1,
threshold: float = 0.01,
):
"""
:param wave_to_spectrum: Callable, 传入 wave, np.ndarray, shape=(n,), 输出 spectrum, np.ndarray, shape=(time_steps, n_dim)
:param sample_rate:
:param template_crop:
:param threshold:
"""
self.wave_to_spectrum = wave_to_spectrum
self.sample_rate = sample_rate
self.template_crop = template_crop
self.threshold = threshold
self.dim = 100
self.label2templates: Dict[str, List[Dict[str, np.ndarray]]] = None
self.max_template_width: int = None
def load_template(self, path: str):
filename_pattern = os.path.join(path, '*/*.wav')
filename_list = glob(filename_pattern)
label2templates = defaultdict(list)
max_template_width = 0
print('loading templates.')
for filename in tqdm(filename_list):
path, fn = os.path.split(filename)
root_path, label = os.path.split(path)
# wave, sample_rate = librosa.load(filename, sr=self.sample_rate)
sample_rate, wave = wavfile.read(filename)
if sample_rate != self.sample_rate:
raise AssertionError('expected sample rate: {}, instead of: {}'.format(self.sample_rate, sample_rate))
if wave.dtype != np.int16:
raise AssertionError('expected wave dtype np.int16, instead of: {}'.format(wave.dtype))
if wave.shape[0] < self.sample_rate:
raise AssertionError('wave.shape: {}'.format(wave.shape))
# dtype np.int16
max_wave_value = 32768.0
wave = wave / max_wave_value
template = self.wave_to_spectrum(wave)
# show_image(template.T)
template = template[:, :self.dim]
template_width, _ = template.shape
if template_width > max_template_width:
max_template_width = template_width
label2templates[label].append({
'filename': filename,
'template': template,
})
self.label2templates = label2templates
self.max_template_width = max_template_width
return label2templates, max_template_width
def template_match_by_wave(self, wave: np.ndarray):
# dtype np.int16
max_wave_value = 32768.0
wave = wave / max_wave_value
spectrum = self.wave_to_spectrum(wave)
spectrum = spectrum[:, :self.dim]
result = self.template_match_by_spectrum(spectrum)
return result
def template_match_by_spectrum(self, spectrum: np.ndarray):
result = self._shadow_template_match(spectrum)
return result
def _shadow_template_match(self, spectrum):
matches = list()
if spectrum.shape[0] < self.max_template_width:
return matches
for label, templates in self.label2templates.items():
for templ in templates:
filename = templ['filename']
template = templ['template']
tw, _ = template.shape[:2]
c = int(tw * self.template_crop)
template = template[c: -c]
tw, th = template.shape[:2]
shadow_m = 3
shadow_spect = spectrum[:, :shadow_m]
shadow_templ = template[:, :shadow_m]
sqdiff_normed = cv.matchTemplate(image=shadow_spect, templ=shadow_templ, method=cv.TM_SQDIFF_NORMED)
min_val, _, min_loc, _ = cv.minMaxLoc(sqdiff_normed)
# print(min_val, min_loc)
if min_val > self.threshold:
continue
# master
_, x = min_loc
match_spectrum = spectrum[x:x+tw, :]
sqdiff_normed = cv.matchTemplate(image=match_spectrum, templ=template, method=cv.TM_SQDIFF_NORMED)
min_val, _, min_loc, _ = cv.minMaxLoc(sqdiff_normed)
# print(min_val, min_loc)
if min_val > self.threshold:
continue
matches.append({
'begin': x,
'width': tw,
'label': label,
'filename': filename,
'min_val': min_val,
})
return matches
def main():
args = get_args()
templates_dir = Path(args.templates_dir)
wav_dir = Path(args.wav_dir)
config_json_file = templates_dir / "config.json"
with open(config_json_file.as_posix(), "r", encoding="utf-8") as f:
config_json = json.load(f)
def wave_to_spectrum(wave: np.ndarray):
spectrum = wave2spectrum_image(wave=wave, sample_rate=8000)
spectrum = np.array(spectrum, dtype=np.float32)
spectrum /= 255
return spectrum
audio_template_match = AudioTemplateMatch(
wave_to_spectrum=wave_to_spectrum,
sample_rate=8000,
template_crop=0.1,
threshold=0.007,
)
audio_template_match.load_template(path=args.templates_dir)
for filename in tqdm(wav_dir.glob("voice/*.wav")):
filename: Path = filename
sample_rate, signal = wavfile.read(filename)
if sample_rate != 8000:
print('sample rate not 8000, filename: {}'.format(filename))
matches = audio_template_match.template_match_by_wave(wave=signal)
if len(matches) == 0:
continue
matches_ = list()
for match in matches:
label = match["label"]
matches_.append({
**match,
"weight": config_json[label].get("weight", 0.0)
})
matches_ = list(sorted(matches_, key=lambda x: x["weight"], reverse=True))
labels = [match["label"] for match in matches_]
labels_ = [label for label in labels if label not in ("music",)]
if len(set(labels_)) > 1:
print("超过两个模板类别被匹配,请检测是否匹配正确。")
print(filename)
for match in matches_:
print(match)
# continue
labels_ = labels_[:1]
if len(labels_) == 0:
label = "music"
else:
label = labels_[0]
if filename.parts[-2] != label:
tgt = filename.parent.parent / label
os.makedirs(tgt, exist_ok=True)
try:
shutil.move(filename.as_posix(), tgt.as_posix())
except shutil.Error:
print(filename)
print(tgt)
continue
return
if __name__ == '__main__':
main()