text
stringlengths 1
93.6k
|
|---|
main()
|
# <FILESEP>
|
import time
|
import torch
|
try:
|
import torch_musa
|
except ImportError as e:
|
print("You should install torch_musa if you want to run on Moore Threads GPU")
|
import os
|
import argparse
|
import torchaudio
|
from torchaudio.transforms import Resample
|
import logging
|
from mooer.datasets.speech_processor import *
|
from mooer.configs import asr_config
|
from mooer.models import mooer_model
|
from mooer.utils.utils import *
|
parser = argparse.ArgumentParser()
|
parser.add_argument("--wav_path", default='demo/resources/demo.wav', type=str, help="decode one wav file")
|
parser.add_argument("--wav_scp", default=None, type=str, help="decode scp if you want")
|
parser.add_argument("--task", default='ast', choices=['asr', 'ast'], type=str, help="task: asr or ast. Please set ast if you choose a asr/ast multitask model")
|
parser.add_argument("--batch_size", default=10, type=int, help="decode batch for scp")
|
parser.add_argument("--cmvn_path", default='', type=str, help="cmvn path. If not set, will use path in src/mooer/configs/asr_config.py")
|
parser.add_argument("--encoder_path", default='', type=str, help="encoder path. If not set, will use the path in src/mooer/configs/asr_config.py")
|
parser.add_argument("--llm_path", default='', type=str, help="llm path. If not set, will use the path in src/mooer/configs/asr_config.py")
|
parser.add_argument("--adapter_path", default='', type=str, help="adapter path. If not set, will use the path in src/mooer/configs/asr_config.py")
|
parser.add_argument("--lora_dir", default='', type=str, help="lora path. If not set, will use path in src/mooer/configs/asr_config.py")
|
args = parser.parse_args()
|
logging.basicConfig(
|
level=logging.INFO,
|
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
|
datefmt="%Y-%m-%d %H:%M:%S",
|
filemode='w'
|
)
|
PROMPT_TEMPLATE_DICT = {
|
'qwen': "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
|
}
|
PROMPT_DICT = {
|
'asr': "Transcribe speech to text. ",
|
'ast': "Translate speech to english text. ",
|
}
|
model_config = asr_config.ModelConfig()
|
logger = logging.getLogger()
|
logger.setLevel(logging.INFO)
|
# replace path
|
if args.llm_path and os.path.exists(args.llm_path):
|
model_config.llm_path = args.llm_path
|
if args.encoder_path and os.path.exists(args.encoder_path):
|
model_config.encoder_path = args.encoder_path
|
if args.adapter_path and os.path.exists(args.adapter_path):
|
model_config.adapter_path = args.adapter_path
|
if args.lora_dir and os.path.exists(args.lora_dir):
|
model_config.lora_dir = args.lora_dir
|
if args.cmvn_path and os.path.exists(args.cmvn_path):
|
model_config.cmvn_path = args.cmvn_path
|
if args.task:
|
model_config.prompt_key = args.task
|
device = str(get_device())
|
logger.info("This demo will run on {}".format(device.upper()))
|
logger.info(model_config)
|
model, tokenizer = mooer_model.init_model(
|
model_config=model_config)
|
model.to(device)
|
model.eval()
|
# data process
|
prompt_template_key = model_config.get('prompt_template_key', 'qwen')
|
prompt_template = PROMPT_TEMPLATE_DICT[prompt_template_key]
|
prompt_key = model_config.get('prompt_key', 'asr')
|
prompt_org = PROMPT_DICT[prompt_key]
|
logger.info(f"Use LLM Type {prompt_template_key}, "
|
f"Prompt template {prompt_template}, "
|
f"Use task type {prompt_key}, "
|
f"Prompt {prompt_org}")
|
cmvn = load_cmvn(model_config.get('cmvn_path'))
|
adapter_downsample_rate = model_config.get('adapter_downsample_rate')
|
def process_wav(wav_path):
|
audio_raw, sample_rate = torchaudio.load(wav_path)
|
if sample_rate != 16000:
|
# resample the data
|
resampler = Resample(orig_freq=sample_rate, new_freq=16000)
|
audio_raw = resampler(audio_raw)
|
if audio_raw.shape[0] > 1:
|
# convert to mono
|
audio_raw = audio_raw.mean(dim=0, keepdim=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.