dlxj commited on
Commit
30842b5
·
1 Parent(s): 31a7185

成功训练

Browse files
examples/asr/asr_eou/speech_to_text_rnnt_eou_train_number.py CHANGED
@@ -3,70 +3,6 @@
3
  # huggingface_echodict\asr_rnnt_eou_from_scratch\papers\arXiv-1211.3711v1\training_number.py
4
  # 训练中文数字识别
5
 
6
- """
7
- Example usage:
8
-
9
- 1. Prepare dataset based on <NeMo Root>/nemo/collections/asr/data/audio_to_eou_label_lhotse.py
10
- Specifically, each sample in the jsonl manifest should have the following fields:
11
- {
12
- "audio_filepath": "/path/to/audio.wav",
13
- "text": "The text of the audio."
14
- "offset": 0.0, # offset of the audio, in seconds
15
- "duration": 3.0, # duration of the audio, in seconds
16
- "sou_time": 0.2, # start of utterance time, in seconds
17
- "eou_time": 1.5, # end of utterance time, in seconds
18
- }
19
-
20
- 2. If using a normal ASR model as initialization:
21
- - Add special tokens <EOU> and <EOB> to the tokenizer of pretrained model, by refering to the script
22
- <NeMo Root>/scripts/asr_eou/tokenizers/add_special_tokens_to_sentencepiece.py
23
- - If pretrained model is HybridRNNTCTCBPEModel, convert it to RNNT using the script
24
- <NeMo Root>/examples/asr/asr_hybrid_transducer_ctc/helpers/convert_nemo_asr_hybrid_to_ctc.py
25
-
26
- 3. Run the following command to train the ASR-EOU model:
27
- ```bash
28
- #!/bin/bash
29
-
30
- TRAIN_MANIFEST=/path/to/train_manifest.json
31
- VAL_MANIFEST=/path/to/val_manifest.json
32
- NOISE_MANIFEST=/path/to/noise_manifest.json
33
-
34
- PRETRAINED_NEMO=/path/to/pretrained_model.nemo
35
- TOKENIZER_DIR=/path/to/tokenizer_dir
36
-
37
- BATCH_SIZE=16
38
- NUM_WORKERS=8
39
- LIMIT_TRAIN_BATCHES=1000
40
- VAL_CHECK_INTERVAL=1000
41
- MAX_STEPS=1000000
42
-
43
- EXP_NAME=fastconformer_transducer_bpe_streaming_eou
44
- SCRIPT=${NEMO_PATH}/examples/asr/asr_eou/speech_to_text_rnnt_eou_train.py
45
- CONFIG_PATH=${NEMO_PATH}/examples/asr/conf/asr_eou
46
- CONFIG_NAME=fastconformer_transducer_bpe_streaming
47
-
48
- CUDA_VISIBLE_DEVICES=0 python $SCRIPT \
49
- --config-path $CONFIG_PATH \
50
- --config-name $CONFIG_NAME \
51
- ++init_from_nemo_model=$PRETRAINED_NEMO \
52
- model.encoder.att_context_size="[70,1]" \
53
- model.tokenizer.dir=$TOKENIZER_DIR \
54
- model.train_ds.manifest_filepath=$TRAIN_MANIFEST \
55
- model.train_ds.augmentor.noise.manifest_path=$NOISE_MANIFEST \
56
- model.validation_ds.manifest_filepath=$VAL_MANIFEST \
57
- model.train_ds.batch_size=$BATCH_SIZE \
58
- model.train_ds.num_workers=$NUM_WORKERS \
59
- model.validation_ds.batch_size=$BATCH_SIZE \
60
- model.validation_ds.num_workers=$NUM_WORKERS \
61
- ~model.test_ds \
62
- trainer.limit_train_batches=$LIMIT_TRAIN_BATCHES \
63
- trainer.val_check_interval=$VAL_CHECK_INTERVAL \
64
- trainer.max_steps=$MAX_STEPS \
65
- exp_manager.name=$EXP_NAME
66
- ```
67
-
68
- """
69
-
70
  import os
71
  import sys
72
  sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
@@ -343,6 +279,81 @@ def main(cfg):
343
 
344
  if __name__ == '__main__':
345
  import sys
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
  sys.argv.extend([
347
  '--config-path', '../conf/asr_eou/',
348
  '--config-name', 'fastconformer_transducer_bpe_streaming_large',
@@ -353,15 +364,11 @@ if __name__ == '__main__':
353
  'exp_manager.checkpoint_callback_params.save_top_k=1',
354
  '++trainer.check_val_every_n_epoch=1',
355
  '++model.encoder.conv_norm_type=layer_norm',
356
- 'model.tokenizer.dir=data/common_voice_11_0/ja/tokenizers/tokenizer_spe_bpe_v2491_eou',
357
- 'model.train_ds.tarred_audio_filepaths=data/common_voice_11_0/ja/train_tarred_1bk/audio__OP_0..1023_CL_.tar',
358
- '++model.train_ds.is_tarred=true',
359
- '++model.train_ds.tarred_dataset_resolve_paths=false',
360
- '++model.train_ds.is_tarred_audio=true',
361
- 'model.train_ds.manifest_filepath=data/common_voice_11_0/ja/train_tarred_1bk/tarred_audio_manifest.json',
362
  '~model.train_ds.augmentor.noise',
363
- 'model.validation_ds.manifest_filepath=data/common_voice_11_0/ja/validation_tarred_1bk/tarred_audio_manifest.json',
364
- 'model.test_ds.manifest_filepath=data/common_voice_11_0/ja/test_tarred_1bk/tarred_audio_manifest.json',
365
  'trainer.max_epochs=1',
366
  'trainer.devices=1',
367
  'trainer.accelerator=gpu',
 
3
  # huggingface_echodict\asr_rnnt_eou_from_scratch\papers\arXiv-1211.3711v1\training_number.py
4
  # 训练中文数字识别
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  import os
7
  import sys
8
  sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
 
279
 
280
  if __name__ == '__main__':
281
  import sys
282
+ import os
283
+ import json
284
+ import wave
285
+ import subprocess
286
+ import shutil
287
+
288
+ txt_path = r"e:\huggingface_echodict\NeMo\data\tts_dataset\labels.txt"
289
+ manifest_path = txt_path.replace(".txt", "_manifest.jsonl")
290
+ text_corpus_path = txt_path.replace(".txt", "_text.txt")
291
+ tokenizer_dir = os.path.join(os.path.dirname(txt_path), "tokenizer_eou")
292
+
293
+ # 动态生成 manifest 和文本语料
294
+ if not os.path.exists(manifest_path) or not os.path.exists(text_corpus_path):
295
+ base_dir = os.path.dirname(txt_path)
296
+ with open(txt_path, 'r', encoding='utf-8') as f_in, \
297
+ open(manifest_path, 'w', encoding='utf-8') as f_out, \
298
+ open(text_corpus_path, 'w', encoding='utf-8') as f_txt:
299
+ for line in f_in:
300
+ line = line.strip()
301
+ if not line:
302
+ continue
303
+ parts = line.split('\t')
304
+ if len(parts) == 2:
305
+ audio_file, text = parts
306
+ audio_filepath = os.path.join(base_dir, audio_file)
307
+ if not os.path.exists(audio_filepath):
308
+ print(f"Warning: {audio_filepath} not found.")
309
+ continue
310
+ try:
311
+ with wave.open(audio_filepath, 'r') as w:
312
+ frames = w.getnframes()
313
+ rate = w.getframerate()
314
+ duration = frames / float(rate)
315
+ except Exception as e:
316
+ print(f"Error reading {audio_filepath}: {e}")
317
+ continue
318
+
319
+ item = {
320
+ "audio_filepath": audio_filepath,
321
+ "duration": duration,
322
+ "text": text,
323
+ }
324
+ f_out.write(json.dumps(item, ensure_ascii=False) + '\n')
325
+ f_txt.write(text + '\n')
326
+
327
+ # 生成 Tokenizer 并添加 EOU/EOB tokens
328
+ if not os.path.exists(os.path.join(tokenizer_dir, "tokenizer.model")):
329
+ print("Generating tokenizer...")
330
+ from nemo.collections.common.tokenizers.sentencepiece_tokenizer import create_spt_model
331
+
332
+ # 1. 训练基础 Tokenizer
333
+ temp_tokenizer_dir = tokenizer_dir + "_temp"
334
+ os.makedirs(temp_tokenizer_dir, exist_ok=True)
335
+ create_spt_model(
336
+ data_file=text_corpus_path,
337
+ vocab_size=32,
338
+ sample_size=-1,
339
+ do_lower_case=True,
340
+ output_dir=temp_tokenizer_dir,
341
+ tokenizer_type="bpe",
342
+ character_coverage=1.0,
343
+ )
344
+
345
+ # 2. 注入 EOU/EOB tokens
346
+ # 使用 NeMo 提供的工具脚本
347
+ add_special_tokens_script = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../scripts/asr_eou/tokenizers/add_special_tokens_to_sentencepiece.py"))
348
+ base_model_path = os.path.join(temp_tokenizer_dir, "tokenizer.model")
349
+
350
+ subprocess.check_call([
351
+ sys.executable, add_special_tokens_script,
352
+ "--input_file", base_model_path,
353
+ "--output_dir", tokenizer_dir
354
+ ])
355
+ print(f"Tokenizer generated successfully at {tokenizer_dir}")
356
+
357
  sys.argv.extend([
358
  '--config-path', '../conf/asr_eou/',
359
  '--config-name', 'fastconformer_transducer_bpe_streaming_large',
 
364
  'exp_manager.checkpoint_callback_params.save_top_k=1',
365
  '++trainer.check_val_every_n_epoch=1',
366
  '++model.encoder.conv_norm_type=layer_norm',
367
+ f'model.tokenizer.dir={tokenizer_dir}',
368
+ f'model.train_ds.manifest_filepath={manifest_path}',
 
 
 
 
369
  '~model.train_ds.augmentor.noise',
370
+ f'model.validation_ds.manifest_filepath={manifest_path}',
371
+ f'model.test_ds.manifest_filepath={manifest_path}',
372
  'trainer.max_epochs=1',
373
  'trainer.devices=1',
374
  'trainer.accelerator=gpu',
scripts/asr_eou/tokenizers/add_special_tokens_to_sentencepiece.py CHANGED
@@ -14,6 +14,9 @@
14
 
15
  import os
16
 
 
 
 
17
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
18
  import logging
19
  import sys
 
14
 
15
  import os
16
 
17
+ import sys
18
+ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
19
+
20
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
21
  import logging
22
  import sys