text
stringlengths
1
93.6k
audio_raw = audio_raw[0]
prompt = prompt_template.format(prompt_org)
audio_mel = compute_fbank(waveform=audio_raw)
audio_mel = apply_lfr(inputs=audio_mel, lfr_m=7, lfr_n=6)
audio_mel = apply_cmvn(audio_mel, cmvn=cmvn)
audio_length = audio_mel.shape[0]
audio_length = audio_length // adapter_downsample_rate
audio_pseudo = torch.full((audio_length,), -1)
prompt_ids = tokenizer.encode(prompt)
prompt_length = len(prompt_ids)
prompt_ids = torch.tensor(prompt_ids, dtype=torch.int64)
example_ids = torch.cat((audio_pseudo, prompt_ids)) # [audio, prompt]
example_mask = example_ids.ge(-1)
items = {
"input_ids": example_ids,
"attention_mask": example_mask,
"audio_mel": audio_mel,
"audio_length": audio_length,
"prompt_length": prompt_length,
}
return items
load_dtype = model_config.get('load_dtype', 'bfloat16')
dtype = torch.float32
if load_dtype == 'float16':
dtype = torch.float16
elif load_dtype == 'bfloat16':
dtype = torch.bfloat16
logging.info(f"Input data type: {dtype}")
context_scope = torch.musa.amp.autocast if 'musa' in device else torch.cuda.amp.autocast
with torch.no_grad():
if args.wav_scp is not None and os.path.exists(args.wav_scp):
batch_size = args.batch_size
infer_time = []
items = parse_key_text(args.wav_scp)
uttids = list(items.keys())
num_batches = len(uttids) // batch_size + (0 if len(uttids) % batch_size == 0 else 1)
for i in range(num_batches):
batch_uttids = uttids[i * batch_size:(i + 1) * batch_size]
batch_wav_paths = [items[uttid] for uttid in batch_uttids]
samples = []
for wav_path in batch_wav_paths:
samples.append(process_wav(wav_path))
batch = process_batch(samples, tokenizer=tokenizer)
for key in batch.keys():
batch[key] = batch[key].to(device) if isinstance(batch[key], torch.Tensor) else batch[key]
with context_scope(dtype=dtype):
ss = time.perf_counter()
model_outputs = model.generate(**batch)
infer_time.append(time.perf_counter() - ss)
logging.info(f"Infer time: {time.perf_counter() - ss}")
output_text = model.tokenizer.batch_decode(model_outputs, add_special_tokens=False,
skip_special_tokens=True)
for idx, text in enumerate(output_text):
logging.info(f"uttid: {batch_uttids[idx]}")
text = text.split('\n')
if len(text) == 2:
logging.info(f"ASR: {text[0].strip()}")
logging.info(f"AST: {text[1].strip()}")
else:
logging.info(f"ASR: {text[0].strip()}")
logging.info("Total inference cost")
logging.info(sum(infer_time))
elif args.wav_path != '' and os.path.exists(args.wav_path):
try:
wav_path = args.wav_path
items = process_wav(wav_path)
batch = process_batch([items], tokenizer=tokenizer)
for key in batch.keys():
batch[key] = batch[key].to(device) if isinstance(batch[key], torch.Tensor) else batch[key]
with context_scope(dtype=dtype):
ss = time.perf_counter()
model_outputs = model.generate(**batch)
logging.info(f"Infer time: {time.perf_counter() - ss}")
output_text = model.tokenizer.batch_decode(model_outputs, add_special_tokens=False,
skip_special_tokens=True)
for text in output_text:
text = text.split('\n')
if len(text) == 2:
logging.info(f"ASR: {text[0].strip()}")
logging.info(f"AST: {text[1].strip()}")
else:
logging.info(f"ASR: {text[0].strip()}")
except Exception as e:
logging.error(e)
else:
raise IOError("You should specify --wav_scp or --wav_path as the input")
# <FILESEP>
from Simulation import *
from SimOptions import *
run_options_dict = {}