{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "4f6ad8a0-2fbf-4732-adab-88acc36814d0", "metadata": {}, "outputs": [], "source": [ "# 准备训练和测试数据" ] }, { "cell_type": "code", "execution_count": 2, "id": "cffb3f58-7324-4825-9fc2-fdae98d0749d", "metadata": {}, "outputs": [], "source": [ "# load phonemizer\n", "import phonemizer\n", "from nltk.tokenize import word_tokenize\n", "\n", "global_phonemizer_en = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)\n", "global_phonemizer_es = phonemizer.backend.EspeakBackend(language='es', preserve_punctuation=True, with_stress=True)\n" ] }, { "cell_type": "code", "execution_count": 3, "id": "8a7bb6c1-9ce9-4d43-958f-112d0df55cce", "metadata": {}, "outputs": [], "source": [ "def preprocess(text, language):\n", " text = text.strip()\n", " if language == 'en-us':\n", " ps = global_phonemizer_en.phonemize([text])\n", " elif language == 'es':\n", " ps = global_phonemizer_es.phonemize([text])\n", " ps = word_tokenize(ps[0])\n", " ps = [p for p in ps if p not in ['``', '`', '(', ')']]\n", " ps = ' '.join(ps)\n", "\n", " return ps" ] }, { "cell_type": "code", "execution_count": 4, "id": "dde01040-55aa-4408-9544-507a56a27b76", "metadata": {}, "outputs": [], "source": [ "# # test\n", "# text = '''¡Usuario! ¿De qué estás hablando? Ese tipo de conversación es inapropiada... Supongo que no puedo decir que no... sólo por esta vez. Mmm... me haces sentir tan bien.'''\n", "# preprocess(text, 'es')" ] }, { "cell_type": "code", "execution_count": 5, "id": "35366d0d-149e-4cf6-8780-be6631dc3f3b", "metadata": {}, "outputs": [], "source": [ "# !pip3 install pandas" ] }, { "cell_type": "code", "execution_count": 6, "id": "da973b2e-4520-4647-8451-a83bf0c412bb", "metadata": {}, "outputs": [], "source": [ "import wave\n", "import soundfile as sf\n", "\n", "def get_wav_duration(file_path):\n", " try:\n", " with wave.open(file_path, 'rb') as wf:\n", " # 获取音频文件的帧数\n", " frames = wf.getnframes()\n", " # 获取帧速率(每秒的帧数)\n", " frame_rate = wf.getframerate()\n", " # 计算音频文件的持续时间(秒数)\n", " duration = frames / float(frame_rate)\n", " return duration\n", " except:\n", " wave_array, sr = sf.read(file_path)\n", " return len(wave_array) / sr" ] }, { "cell_type": "code", "execution_count": 7, "id": "8b93b418-d2f6-46c9-a6b7-71fa37c063ea", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 1151/1151 [00:08<00:00, 137.66it/s]\n", "100%|██████████| 40/40 [00:00<00:00, 124.61it/s]\n", "100%|██████████| 39/39 [00:00<00:00, 130.81it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "142731 5491 4619\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "import os\n", "import pandas as pd\n", "from tqdm import tqdm\n", "\n", "# libritts\n", "def prepare_libritts_data(dir_path, id1_list):\n", " final_data = []\n", " data = {}\n", " \n", " for id1_index in tqdm(range(len(id1_list))):\n", " id1 = id1_list[id1_index]\n", " # id1是speaker id\n", " for id2 in os.listdir(dir_path+'/'+id1):\n", " for fname in os.listdir(dir_path+'/'+id1+'/'+id2):\n", " if 'txt' in fname or 'wav' in fname:\n", " whole_id = fname.split('.')[0]\n", " if whole_id not in data:\n", " data[whole_id] = {}\n", " \n", " if 'wav' in fname:\n", " wav_path = dir_path+'/'+id1+'/'+id2+'/'+fname\n", " wav_duration = get_wav_duration(wav_path)\n", " data[whole_id]['wav'] = wav_path\n", " data[whole_id]['wav_dur'] = wav_duration\n", " \n", " elif 'normalized' in fname: # 使用normalized版本,或是original\n", " with open(dir_path+'/'+id1+'/'+id2+'/'+fname, 'r') as f:\n", " data[whole_id]['text'] = f.read().strip()\n", " \n", " data[whole_id]['speaker_id'] = id1\n", "\n", " for d in data:\n", " if data[d]['wav_dur'] > 1:\n", " # 去除太短的\n", " final_data.append(f'''{data[d]['wav']}|{data[d]['text']}|{data[d]['speaker_id']}''')\n", " \n", " return final_data\n", "\n", "train_path = '/workspace/TTS/data/LibriTTS/train-clean-460'\n", "train_ids = os.listdir(train_path)\n", "\n", "val_path = '/workspace/TTS/data/LibriTTS/dev-clean'\n", "val_ids = os.listdir(val_path)\n", "\n", "test_path = '/workspace/TTS/data/LibriTTS/test-clean'\n", "test_ids = os.listdir(test_path)\n", "\n", "libritts_train_data = prepare_libritts_data(train_path, train_ids)\n", "libritts_val_data = prepare_libritts_data(val_path, val_ids)\n", "libritts_test_data = prepare_libritts_data(test_path, test_ids)\n", "\n", "print(len(libritts_train_data), len(libritts_val_data), len(libritts_test_data))" ] }, { "cell_type": "code", "execution_count": 8, "id": "a9f1c3cf-16ae-4b48-9b96-0440c1056db4", "metadata": {}, "outputs": [], "source": [ "# # 分析一下libritts的数据分布,给个参考\n", "# id2num = {}\n", "# for d in libritts_train_data:\n", "# _id = d.split('|')[2]\n", "# if _id not in id2num:\n", "# id2num[_id] = 1\n", "# else:\n", "# id2num[_id] += 1\n", "\n", "# sorted_dict = sorted(id2num.items(), key=lambda x: x[1], reverse=True)" ] }, { "cell_type": "code", "execution_count": 9, "id": "c93c9b4f-4cee-42f6-8fa2-c3cac47220c3", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# for i, d in enumerate(sorted_dict):\n", "# print(d, i/len(id2num))" ] }, { "cell_type": "code", "execution_count": 10, "id": "d357def5-b0af-4a73-a04e-cc1084c83a83", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "153150 1897 1662\n" ] } ], "source": [ "# cml-tts\n", "def prepare_cml_data():\n", " # 准备 cml-tts数据,直接读取csv数据即可\n", " # 去掉transcription不太对的数据\n", " # +100000是为了和libritts错开speaker id\n", "\n", " df_train = pd.read_csv('/workspace/TTS/data/cml_tts_dataset_spanish_v0.1/train.csv', delimiter='|')\n", " train = []\n", " for index, row in df_train.iterrows():\n", " if row['levenshtein'] >= 0.9 and row['duration'] > 1 and row['duration'] < 30:\n", " train.append(f'''/workspace/TTS/data/cml_tts_dataset_spanish_v0.1/{row['wav_filename']}|{row['transcript']}|{10000+row['client_id']}''')\n", " \n", " df_val = pd.read_csv('/workspace/TTS/data/cml_tts_dataset_spanish_v0.1/dev.csv', delimiter='|')\n", " val = []\n", " for index, row in df_val.iterrows():\n", " if row['levenshtein'] >= 0.9 and row['duration'] > 1 and row['duration'] < 30:\n", " val.append(f'''/workspace/TTS/data/cml_tts_dataset_spanish_v0.1/{row['wav_filename']}|{row['transcript']}|{10000+int(row['client_id'])}''')\n", "\n", " df_test = pd.read_csv('/workspace/TTS/data/cml_tts_dataset_spanish_v0.1/test.csv', delimiter='|')\n", " test = []\n", " for index, row in df_test.iterrows():\n", " if row['levenshtein'] >= 0.9 and row['duration'] > 1 and row['duration'] < 30:\n", " test.append(f'''/workspace/TTS/data/cml_tts_dataset_spanish_v0.1/{row['wav_filename']}|{row['transcript']}|{10000+int(row['client_id'])}''')\n", "\n", " return train, val, test\n", " \n", "cml_train_data, cml_val_data, cml_test_data = prepare_cml_data()\n", "print(len(cml_train_data), len(cml_val_data), len(cml_test_data))" ] }, { "cell_type": "code", "execution_count": 11, "id": "f25a81d5-d032-4d6d-a981-9f866c5a14c6", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-base-960h and are newly initialized: ['wav2vec2.masked_spec_embed']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC\n", "import torch\n", "import soundfile as sf\n", "import librosa\n", "\n", "# load model and tokenizer\n", "wav2vec_processor = Wav2Vec2Processor.from_pretrained(\"facebook/wav2vec2-base-960h\", cache_dir = '/workspace/hf_resource/')\n", "wav2vec_model = Wav2Vec2ForCTC.from_pretrained(\"facebook/wav2vec2-base-960h\", cache_dir = '/workspace/hf_resource/')\n", "\n", "def wav2vec_asr(wav_path, end_second = None):\n", " wave, sr = sf.read(wav_path)\n", " if wave.shape[-1] == 2:\n", " wave = wave[:, 0].squeeze()\n", " if sr != 16000:\n", " wave = librosa.resample(wave, orig_sr=sr, target_sr=16000)\n", " \n", " wave = torch.from_numpy(wave).float()\n", "\n", " if end_second is not None:\n", " wave = wave[:int(16000 * end_second)]\n", "\n", " # tokenize\n", " input_values = wav2vec_processor(wave, return_tensors=\"pt\", padding=\"longest\", sampling_rate=16000).input_values # Batch size 1\n", " \n", " # retrieve logits\n", " logits = wav2vec_model(input_values).logits\n", " \n", " # take argmax and decode\n", " predicted_ids = torch.argmax(logits, dim=-1)\n", " transcription = wav2vec_processor.batch_decode(predicted_ids)\n", "\n", " return transcription" ] }, { "cell_type": "code", "execution_count": 12, "id": "f2dcd822-6d1d-41de-babd-a966ef0d1da1", "metadata": { "scrolled": true }, "outputs": [ { "ename": "ModuleNotFoundError", "evalue": "No module named 'whisper'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mwhisper\u001b[39;00m\n\u001b[1;32m 3\u001b[0m model \u001b[38;5;241m=\u001b[39m whisper\u001b[38;5;241m.\u001b[39mload_model(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlarge-v3\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21masr\u001b[39m(wav_path):\n", "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'whisper'" ] } ], "source": [ "import whisper\n", "\n", "model = whisper.load_model(\"large-v3\")\n", "\n", "def asr(wav_path):\n", " result = model.transcribe(wav_path)\n", " return result[\"text\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "d86ed416-9110-4e02-a605-1ca645f258bc", "metadata": {}, "outputs": [], "source": [ "# 本质是,第一个单词不能太晚说出来\n", "import whisperx\n", "\n", "model_a, metadata = whisperx.load_align_model(language_code='en', device='cuda')\n", "\n", "def alignment(wav_path):\n", " result = model.transcribe(wav_path)\n", " \n", " result = whisperx.align(result[\"segments\"], model_a, metadata, wav_path, 'cuda', return_char_alignments=False)\n", " \n", " return result[\"segments\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "a62e3c95-c1e5-48e1-82b4-56836e7c2bd4", "metadata": {}, "outputs": [], "source": [ "# 加载已经处理好的数据,主要为得到id list,为重新采样做准备\n", "game_train_ids, game_val_ids, game_test_ids = [], [], []\n", "\n", "with open('/workspace/TTS/tts/StyleTTS2/Data/train_list_en_20k_0318.txt', 'r') as f:\n", " for line in f:\n", " line = line.strip()\n", " if 'game_en' in line:\n", " game_train_ids.append('/'.join(line.split('|')[0].split('/')[:-1]))\n", "\n", "with open('/workspace/TTS/tts/StyleTTS2/Data/val_list_en_20k_0318.txt', 'r') as f:\n", " for line in f:\n", " line = line.strip()\n", " if 'game_en' in line:\n", " game_val_ids.append('/'.join(line.split('|')[0].split('/')[:-1]))\n", "\n", "with open('/workspace/TTS/tts/StyleTTS2/Data/OOD_texts_en_2k_0318.txt', 'r') as f:\n", " for line in f:\n", " line = line.strip()\n", " if 'game_en' in line:\n", " game_test_ids.append('/'.join(line.split('|')[0].split('/')[:-1]))" ] }, { "cell_type": "code", "execution_count": null, "id": "e45ae4c5-5163-4c48-85b0-0b84055468d6", "metadata": { "scrolled": true }, "outputs": [], "source": [ "for d in game_train_ids:\n", " print(d)" ] }, { "cell_type": "code", "execution_count": null, "id": "ebfd5f74-2796-41fd-8cc6-52464776dc33", "metadata": {}, "outputs": [], "source": [ "# 加入游戏数据(英文)\n", "# 过滤游戏数据\n", "# 短于1s的不要、长度太短的不要、太长的也不要(30s为限)\n", "game_en = []\n", "\n", "def get_game_data(path, speaker_id):\n", " role_names = os.listdir(path)\n", " for role_name in role_names:\n", " if '#Unknown' in role_name or ' ' in role_name:\n", " continue\n", " \n", " file_names = os.listdir(os.path.join(path, role_name))\n", " file_names = [n for n in file_names if 'lab' in n or 'wav' in n]\n", " id2wav, id2text = {}, {}\n", " for file_name in file_names:\n", " id = file_name.split('.')[0]\n", " \n", " if 'lab' in file_name:\n", " try:\n", " with open(path+'/'+role_name+'/'+file_name, 'r') as f:\n", " text = f.read().strip()\n", " id2text[id] = text\n", " except:\n", " continue\n", " \n", " elif 'wav' in file_name:\n", " wav = path+'/'+role_name+'/'+file_name\n", " id2wav[id] = wav\n", " \n", " for id in id2wav:\n", " if id in id2text:\n", " wav_path, text = id2wav[id], id2text[id]\n", " wav_len = get_wav_duration(wav_path) \n", " if wav_len > 1 and wav_len < 30 and len(text.split(' ')) >= 3 and len(text.split(' ')) <= 50 and '{' not in text and '<' not in text:\n", " game_en.append(f'{wav_path}|{text}|{speaker_id}')\n", "\n", " speaker_id += 1\n", " \n", " return speaker_id\n", "\n", "sid = get_game_data('/workspace/TTS/data/game_en', 30000)\n", "print(sid)\n", "sid = get_game_data('/workspace/TTS/data/game_en1', sid)\n", "print(sid)\n", "\n", "print(len(game_en))\n" ] }, { "cell_type": "code", "execution_count": 18, "id": "776c404e-9865-4f92-b593-272e4d065372", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# # 按照speaker id,统计一下分布\n", "# sid2game = {}\n", "# for d in game_en:\n", "# d = d.strip().split('|')\n", "# sid = '/'.join(d[0].split('/')[:-1])\n", "# if sid not in sid2game:\n", "# sid2game[sid] = [d]\n", "# else:\n", "# sid2game[sid].append(d)\n", "\n", "# sid2game = sorted(sid2game.items(), key=lambda x: len(x[1]), reverse=True)\n", "\n", "# whole_num = sum([len(wavs) for sid, wavs in sid2game])\n", "# print(whole_num)\n", "# for sid, wavs in sid2game:\n", "# print(sid, len(wavs), len(wavs)/whole_num)" ] }, { "cell_type": "code", "execution_count": 19, "id": "6a5abdd0-2d6e-433d-8c9e-cd64a1e512dd", "metadata": {}, "outputs": [], "source": [ "# len(sid2game)" ] }, { "cell_type": "code", "execution_count": 20, "id": "20133137-f9b1-444e-a4e2-477d858b0af9", "metadata": {}, "outputs": [], "source": [ "# 对游戏数据,再次进行处理\n", "# 处理1:每个sid,设定样本上限\n", "# 处理2:按照sid,划分train/val/test\n", "\n", "import random\n", "import numpy as np\n", "\n", "# 隐藏seed,可以更充分利用data\n", "random.seed(42)\n", "np.random.seed(42)\n", "\n", "sid2game = {}\n", "for d in game_en:\n", " sid = d.split('|')[2]\n", " if sid not in sid2game:\n", " sid2game[sid] = [d]\n", " else:\n", " sid2game[sid].append(d)\n", "\n", "max_sample = 150\n", "filter_sid2game = {}\n", "for sid in sid2game:\n", " wavs = sid2game[sid]\n", " if len(wavs) > max_sample:\n", " np.random.shuffle(wavs)\n", " wavs = wavs[:max_sample]\n", " filter_sid2game[sid] = wavs\n" ] }, { "cell_type": "code", "execution_count": 21, "id": "ec591e2c-2f84-44b6-9bbe-9e511c16cc84", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# print(len([d for d in filter_sid2game if len(filter_sid2game[d]) > 1]))\n", "# print(len(filter_sid2game))\n", "# filter_sid2game = sorted(filter_sid2game.items(), key=lambda x: len(x[1]), reverse=True)\n", "\n", "# whole_num = sum([len(wavs) for sid, wavs in filter_sid2game])\n", "# print(whole_num)\n", "# for sid, wavs in filter_sid2game:\n", "# print(sid, len(wavs), len(wavs)/whole_num)" ] }, { "cell_type": "code", "execution_count": 22, "id": "10dc4e4f-f3c3-4bcd-9330-413636f4e638", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# index = random.choice(range(len(game_en_train)))\n", "# tmp = game_en_train[index]\n", "# print(tmp)\n", "# import IPython.display as ipd\n", "# display(ipd.Audio(tmp.split('|')[0], rate=24000, normalize=False))\n" ] }, { "cell_type": "code", "execution_count": 23, "id": "cee4bad1-4d05-45bd-8140-0ee8e677dc92", "metadata": {}, "outputs": [], "source": [ "# game_en_train, game_en_val, game_en_test = [], [], []\n", "# # whole_sids = list(filter_sid2game.keys())\n", "# # np.random.shuffle(whole_sids)\n", "\n", "# # train_sids = whole_sids[:int(0.8 * len(whole_sids))]\n", "# # val_sids = whole_sids[int(0.8 * len(whole_sids)):int(0.9 * len(whole_sids))]\n", "# # test_sids = whole_sids[int(0.9 * len(whole_sids)):]\n", "\n", "# train_sids = set()\n", "# val_sids = set()\n", "# test_sids = set()\n", "# for sid in filter_sid2game:\n", "# wavs = filter_sid2game[sid]\n", "# if '/'.join(wavs[0].split('/')[:-1]) in game_train_ids and '男声' not in wavs[0] and '女声' not in wavs:\n", " \n", "# train_sids.add(sid)\n", "# elif '/'.join(wavs[0].split('/')[:-1]) in game_val_ids:\n", "# val_sids.add(sid)\n", "# elif '/'.join(wavs[0].split('/')[:-1]) in game_test_ids:\n", "# test_sids.add(sid)\n", "\n", "# for sid in train_sids:\n", "# game_en_train += filter_sid2game[sid]\n", "\n", "# for sid in val_sids:\n", "# game_en_val += filter_sid2game[sid]\n", "\n", "# for sid in test_sids:\n", "# game_en_test += filter_sid2game[sid]\n", "\n", "# np.random.shuffle(game_en_train)\n", "# np.random.shuffle(game_en_val)\n", "# np.random.shuffle(game_en_test)\n", "\n", "# print(len(game_en_train), len(game_en_val), len(game_en_test))\n" ] }, { "cell_type": "code", "execution_count": 24, "id": "8351bf48-26f7-4111-b0ea-4ac055953fca", "metadata": {}, "outputs": [], "source": [ "# 加入emotional tts数据" ] }, { "cell_type": "code", "execution_count": 25, "id": "bba721d0-0c35-4388-968f-ef5b7a96d91e", "metadata": {}, "outputs": [], "source": [ "# !pip3 install langdetect" ] }, { "cell_type": "code", "execution_count": 26, "id": "f3955304-af9e-471c-8fd9-e760b0796202", "metadata": {}, "outputs": [], "source": [ "# from langdetect import detect" ] }, { "cell_type": "code", "execution_count": 27, "id": "5a735bbe-628d-487d-9692-2c11e6ce1909", "metadata": {}, "outputs": [], "source": [ "import os\n", "import re\n", "\n", "def is_english_string(s):\n", " return bool(re.match('^[a-zA-Z\\s]+$', s))\n", " \n", "esd = []\n", "emov = []\n", "jl = []\n", "rav = []\n", "\n", "esd_path = '/workspace/TTS/data/ESD/dataset/'\n", "esd_trans = []\n", "for f1_name in os.listdir(esd_path):\n", " if '.' not in f1_name:\n", " with open(esd_path + f1_name + '/' + f1_name + '.txt', 'r') as f:\n", " for line in f:\n", " line = line.strip().split('\\t')\n", " id, trans, emo = line[0], line[1], line[2]\n", " if is_english_string(emo):\n", " esd_trans.append([id, trans, emo])\n", "\n", "emov_path = '/workspace/TTS/data/emov_db/'\n", "# 没有trans,需要过whisper\n", "for f1_name in os.listdir(emov_path):\n", " for f2_name in os.listdir(emov_path + f1_name):\n", " if 'wav' in f2_name:\n", " emov.append(f'{emov_path}{f1_name}/{f2_name}')\n", "\n", "rav_path = '/workspace/TTS/data/ravdess/'\n", "# 也没有trans,需要过whisper\n", "for f1_name in os.listdir(rav_path):\n", " for f2_name in os.listdir(rav_path + f1_name):\n", " rav.append(f'/workspace/TTS/data/ravdess/{f1_name}/{f2_name}')" ] }, { "cell_type": "code", "execution_count": 28, "id": "4ce217b6-09cc-4932-9209-fb213cdde19a", "metadata": {}, "outputs": [], "source": [ "jl_path = '/workspace/TTS/data/jl_corpus/raw_jl/jl/'\n", "jl_sid2wav = {}\n", "jl_sid2txt = {}\n", "# 有trans\n", "for f1_name in os.listdir(jl_path):\n", " if 'wav' in f1_name:\n", " sid = f1_name.split('.')[0]\n", " jl_sid2wav[sid] = jl_path + f1_name\n", " elif 'txt' in f1_name:\n", " with open(jl_path + f1_name, 'r') as f:\n", " jl_sid2txt[sid] = f.read().strip()\n", "\n", "for sid in jl_sid2wav:\n", " if sid in jl_sid2txt:\n", " jl.append([jl_sid2wav[sid], jl_sid2txt[sid]])\n", "\n" ] }, { "cell_type": "code", "execution_count": 29, "id": "cc8666ed-c576-409f-bfeb-03c8827de90c", "metadata": {}, "outputs": [], "source": [ "# 进行asr\n", "need_whisper = []\n", "for d in game_en_train + game_en_val + game_en_test:\n", " need_whisper.append(d.split('|')[0])\n", "\n", "need_whisper += rav\n", "print(len(need_whisper))\n" ] }, { "cell_type": "code", "execution_count": 30, "id": "2cc9361f-39dd-45d3-a548-0118041c5cb4", "metadata": {}, "outputs": [], "source": [ "from tqdm import tqdm\n", "asr_res = []\n", "\n", "with open('/workspace/TTS/data/whisper_large_v3_result_0330.txt', 'w') as f:\n", " for i in tqdm(range(len(need_whisper))):\n", " wav_path = need_whisper[i]\n", " trans = ''\n", " try:\n", " trans = asr(wav_path)\n", " except:\n", " pass\n", " \n", " asr_res.append([wav_path, trans])\n", " f.write(f'{wav_path}|{trans}\\n')\n" ] }, { "cell_type": "code", "execution_count": 31, "id": "d25b2f6c-6bd1-488d-bb77-7891d5aee5a6", "metadata": {}, "outputs": [], "source": [ "# len(emov)" ] }, { "cell_type": "code", "execution_count": 32, "id": "bc51b44d-57f2-4fc7-bbb2-08e6cbe7bfbe", "metadata": {}, "outputs": [], "source": [ "# 对emov也进行asr\n", "new_need_whisper = []\n", "new_need_whisper += emov\n", "\n", "with open('/workspace/TTS/data/whisper_large_v3_result_0401_add.txt', 'w') as f:\n", " for i in tqdm(range(len(new_need_whisper))):\n", " wav_path = new_need_whisper[i]\n", " trans = ''\n", " try:\n", " trans = asr(wav_path)\n", " except:\n", " pass\n", "\n", " f.write(f'{wav_path}|{trans}\\n')" ] }, { "cell_type": "code", "execution_count": 33, "id": "13c471aa-0d2a-4050-a438-5bcb02c437c2", "metadata": {}, "outputs": [], "source": [ "asr_res = []\n", "with open('/workspace/TTS/data/whisper_large_v3_result_0330.txt', 'r') as f:\n", " for line in f:\n", " line = line.strip().split('|')\n", " asr_res.append([line[0], line[1]])\n", "\n", "with open('/workspace/TTS/data/whisper_large_v3_result_0401_add.txt', 'r') as f:\n", " for line in f:\n", " line = line.strip().split('|')\n", " asr_res.append([line[0], line[1]])" ] }, { "cell_type": "code", "execution_count": 34, "id": "77210c33-9e55-476e-891c-edb0de0b0ad1", "metadata": {}, "outputs": [], "source": [ "# len(asr_res)" ] }, { "cell_type": "code", "execution_count": 35, "id": "3c3afd1a-ad41-4c8f-a48f-6c75202024e6", "metadata": {}, "outputs": [], "source": [ "# !pip install editdistance" ] }, { "cell_type": "code", "execution_count": 37, "id": "eea2bb15-613a-4f85-8fc2-9995f94df5e8", "metadata": {}, "outputs": [], "source": [ "# 过滤游戏数据\n", "import string\n", "import IPython.display as ipd\n", "import editdistance\n", "\n", "wav2asr = {}\n", "for items in asr_res:\n", " wav2asr[items[0]] = items[1]\n", "\n", "def remove_punctuation(text):\n", " # 定义标点符号集合\n", " punctuation = string.punctuation\n", " # 遍历字符串,将标点符号替换为空格\n", " result = ''.join([char if char not in punctuation else '' for char in text])\n", " return result\n", "\n", "def jaccard_similarity(s1, s2):\n", " set1 = set(s1)\n", " set2 = set(s2)\n", " intersection = len(set1.intersection(set2))\n", " union = len(set1.union(set2))\n", " return intersection / union if union != 0 else 0\n", " \n", "def is_game_en_valid(wav_path, text, log = False):\n", " aha_words = ['ahem', 'argh', 'sigh', 'sobs', 'uh', 'um', 'hmm', 'ugh', 'woo', 'haha', 'hehe', 'huh', 'hmph', 'hm', 'whoa', 'yay', 'ah', 'mmhmm', 'ha', 'oh', 'hu', 'mmm']\n", "\n", " if wav_path in wav2asr:\n", " whisper_trans = remove_punctuation(wav2asr[wav_path].lower().strip())\n", " else:\n", " whisper_trans = asr(wav_path).lower().strip()\n", "\n", " original_text = text\n", " \n", " text = remove_punctuation(text.lower().strip())\n", "\n", " duration = get_wav_duration(wav_path)\n", " mean_dur = duration / len(text.split(' '))\n", "\n", " jaccard_score = jaccard_similarity(whisper_trans.split(' '), text.split(' '))\n", " edit_distance = editdistance.eval(whisper_trans.split(' '), text.split(' '))\n", "\n", " length_ratio = len(whisper_trans.split(' ')) / len(text.split(' '))\n", "\n", " if '*' in original_text \\\n", " or (edit_distance >= 3 and length_ratio > 1.5) \\\n", " or whisper_trans.split(' ')[0] in aha_words \\\n", " or text.split(' ')[0] in aha_words \\\n", " or ('男声' in wav_path or '女声' in wav_path or '会场广播' in wav_path):\n", " # if '*' in text or len(whisper_trans) == 0 or length_ratio > 1.5 or mean_dur > 1.0 or jaccard_score < 0.3 or (whisper_trans.split(' ')[0] != text.split(' ')[0] and whisper_trans.split(' ')[0] in aha_words) or text.split(' ')[0] in aha_words or 'sigh' in text or len(start_wav2vec_trans0) == 0 or start_wav2vec_trans0.split(' ')[0] in aha_words:\n", " if log:\n", " print(wav_path)\n", " display(ipd.Audio(wav_path, rate=24000, normalize=False))\n", " print(f'mean_dur: {mean_dur}')\n", " print(f'transcript(orignial): {text} \\ntranscript(whisper): {whisper_trans}')\n", " print(f'length_ratio: {length_ratio}')\n", " print(f'jaccard_score: {jaccard_score}')\n", " print(f'editdistance: {edit_distance}')\n", " print()\n", " return False\n", " \n", " wav2vec_trans0 = wav2vec_asr(wav_path, 0.5)[0].lower().strip()\n", " if len(wav2vec_trans0) > 0 and wav2vec_trans0.split(' ')[0] in aha_words:\n", " if log:\n", " print(f'transcript-0.5s(wav2vec): {wav2vec_trans0}')\n", " return False\n", " \n", " wav2vec_trans1 = wav2vec_asr(wav_path, 2.0)[0].lower().strip()\n", " if len(wav2vec_trans1) > 0 and wav2vec_trans1.split(' ')[0] in aha_words:\n", " if log:\n", " print(f'transcript-2s(wav2vec): {wav2vec_trans1}')\n", " return False\n", " \n", " if len(wav2vec_trans0) == 0:\n", " alignment_asr = alignment(wav_path)\n", " if len(alignment_asr) > 0 and alignment_asr[0]['start'] > 0.5:\n", " if log:\n", " print(f'alignment: {alignment_asr}')\n", " return False \n", "\n", " return True\n", "\n", " # # 第二次过滤\n", " # trans1 = asr(wav_path, 2.0)[0].lower().strip()\n", " # trans2 = asr(wav_path, 3.0)[0].lower().strip()\n", " # if (len(trans1) > 0 and trans1.split(' ')[0] in aha_words) or (len(trans2) > 0 and trans2.split(' ')[0] in aha_words):\n", " # if log:\n", " # print(wav_path, text)\n", " # print(trans0)\n", " # print(trans1)\n", " # print(trans2)\n", " # display(ipd.Audio(wav_path, rate=24000, normalize=False))\n", " # return False\n", " # else:\n", " # return True\n" ] }, { "cell_type": "code", "execution_count": 38, "id": "7c4a6cc8-3a08-4a97-9658-3cece6c97628", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# # 看过滤规则的准确率【这些样本是不是不想加入训练的】\n", "# sample_size = 200\n", "# game_en_tmp = np.array(game_en_train)\n", "# random_indices = np.random.choice(len(game_en_train), size=sample_size, replace=False)\n", "# game_en_tmp = game_en_tmp[random_indices]\n", "\n", "# game_en_tmp1 = []\n", "# for i in tqdm(range(len(game_en_tmp))):\n", "# d = game_en_tmp[i]\n", "# if is_game_en_valid(d.split('|')[0], d.split('|')[1], log=True):\n", "# game_en_tmp1.append(d)\n", "\n", "# print(len(game_en_tmp1))" ] }, { "cell_type": "code", "execution_count": 39, "id": "7a1c7cc8-c93a-4dfc-b00a-651254a17fe7", "metadata": {}, "outputs": [], "source": [ "# # 随机查看过滤后的数据【看看是不是都想用来训练】,保证召回率\n", "# tmp = random.choice(game_en_tmp1)\n", "# print(tmp)\n", "# display(ipd.Audio(tmp.split('|')[0], rate=24000, normalize=False))" ] }, { "cell_type": "code", "execution_count": 40, "id": "2c3da5b7-3776-4597-a719-4ac51ebdd442", "metadata": {}, "outputs": [], "source": [ "# same_speaker_data = [d for d in game_en_train if d.split('|')[-1]==tmp.split('|')[-1]]\n", "# for d in same_speaker_data[:3]:\n", "# print(d)\n", "# display(ipd.Audio(d.split('|')[0], rate=24000, normalize=False))" ] }, { "cell_type": "code", "execution_count": 377, "id": "f2554191-419a-40fc-a2e8-87fffac30fcb", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 2798/2798 [21:25<00:00, 2.18it/s] s]\n", " 95%|█████████▌| 3189/3347 [22:40<01:23, 1.89it/s] " ] }, { "name": "stdout", "output_type": "stream", "text": [ "Failed to align segment (\"风流尽在山水间\"): no characters in this segment found in model dictionary, resorting to original...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 3347/3347 [23:41<00:00, 2.35it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "19338 2136 2674\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "# 过滤数据\n", "def purify_game(data):\n", " new_data = []\n", " for i in tqdm(range(len(data))):\n", " d = data[i]\n", " if is_game_en_valid(d.split('|')[0], d.split('|')[1], log=False):\n", " new_data.append(d)\n", " return new_data\n", "\n", "game_en_train = purify_game(game_en_train)\n", "game_en_val = purify_game(game_en_val)\n", "game_en_test = purify_game(game_en_test)\n", "\n", "print(len(game_en_train), len(game_en_val), len(game_en_test))" ] }, { "cell_type": "code", "execution_count": 386, "id": "59d2027f-a23c-448f-873a-6546504e62d8", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1200 150 150\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 6893/6893 [00:00<00:00, 389635.42it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "375 75 75\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "# 过滤emotional tts数据,包括采样处理\n", "emo_start_id = 35000 # 避开游戏\n", "\n", "emo_id_num = -1\n", "emotion_data = []\n", "\n", "#esd\n", "esd_ids = []\n", "esd_data = {}\n", "for d in esd_trans:\n", " id, txt, emo = d[0], d[1], d[2]\n", " role_id = id.split('_')[0] + d[2]\n", " if role_id not in esd_ids:\n", " emo_id_num += 1\n", " esd_ids.append(role_id)\n", "\n", " wav_path = f'''/workspace/TTS/data/ESD/dataset/{id.split('_')[0]}/{emo}/{id}.wav'''\n", " if role_id not in esd_data:\n", " esd_data[role_id] = [[wav_path, txt, emo_start_id + emo_id_num]]\n", " else:\n", " esd_data[role_id].append([wav_path, txt, emo_start_id + emo_id_num])\n", "\n", "# 采样\n", "esd_train, esd_val, esd_test = [], [], []\n", "esd_max_num = 30\n", "for role_id in esd_data:\n", " d = esd_data[role_id]\n", " np.random.shuffle(d)\n", " d = d[:esd_max_num]\n", " if '0011' in role_id:\n", " esd_val += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", " elif '0017' in role_id:\n", " esd_test += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", " else:\n", " esd_train += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", "\n", "print(len(esd_train), len(esd_val), len(esd_test))\n", "\n", "# emov\n", "emov_data = {}\n", "for i in tqdm(range(len(emov))):\n", " d = emov[i]\n", " role_name = d.split('/')[5]\n", " emo = d.split('/')[6].split('_')[0]\n", " role_id = role_name + '_' + emo\n", "\n", " if 'neu' in role_id or 'ang' in role_id:\n", " if role_id not in emov_data:\n", " emo_id_num += 1\n", " emov_data[role_id] = [[d, wav2asr[d].strip(), emo_start_id + emo_id_num]]\n", " else:\n", " emov_data[role_id].append([d, wav2asr[d].strip(), emo_start_id + emo_id_num])\n", "\n", "emov_train, emov_val, emov_test = [], [], []\n", "emov_max_num = 75 #emo少\n", "for role_id in emov_data:\n", " d = emov_data[role_id]\n", " np.random.shuffle(d)\n", " d = d[:emov_max_num]\n", " if 'jenie_neutral' in role_id:\n", " emov_val += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", " elif 'jenie_anger' in role_id:\n", " emov_test += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", " else:\n", " emov_train += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", "\n", "print(len(emov_train), len(emov_val), len(emov_test))" ] }, { "cell_type": "code", "execution_count": 388, "id": "3dbcf791-840c-414f-a557-5d9713dd0407", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 2380/2380 [00:00<00:00, 200093.08it/s]\n", "100%|██████████| 40/40 [03:53<00:00, 5.84s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "340 20 40\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 1440/1440 [00:00<00:00, 156135.71it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "192 0 0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "# jl\n", "jl_data = {}\n", "for i in tqdm(range(len(jl))):\n", " d = jl[i]\n", " role_name = d[0].split('/')[-1].split('_')[0]\n", " emo = d[0].split('/')[-1].split('_')[1]\n", " text = d[1].strip()\n", " role_id = role_name + '_' + emo\n", " # role_id = role_name\n", "\n", " if role_id not in jl_data:\n", " emo_id_num += 1\n", " jl_data[role_id] = [[d[0], text, emo_start_id + emo_id_num]]\n", " else:\n", " jl_data[role_id].append([d[0], text, emo_start_id + emo_id_num])\n", "\n", "jl_train, jl_val, jl_test = [], [], []\n", "jl_max_num = 10 #emo多\n", "jl_ids = list(jl_data.keys())\n", "for i in tqdm(range(len(jl_ids))):\n", " role_id = jl_ids[i]\n", " d = jl_data[role_id]\n", " np.random.shuffle(d)\n", " d = d[:jl_max_num]\n", "\n", " # 去掉transcript没对齐的数据\n", " new_d = []\n", " for _d in d:\n", " wav_path = _d[0]\n", " text = _d[1]\n", " whisper_asr = asr(wav_path).strip()\n", "\n", " new_d.append([_d[0], whisper_asr, _d[2]])\n", " \n", " # edit_distance = editdistance.eval(remove_punctuation(text.lower().strip()).split(' '), remove_punctuation(whisper_asr.lower().strip()).split(' '))\n", " # if edit_distance == 0:\n", " # new_d.append(_d)\n", "\n", " d = new_d\n", " \n", " if 'female1_encouraging' in role_id or 'female2_happy' in role_id:\n", " jl_val += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", " elif 'male1_angry' in role_id or 'male2_anxious' in role_id:\n", " jl_test += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", " else:\n", " jl_train += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", "\n", "print(len(jl_train), len(jl_val), len(jl_test))\n", "\n", "# rav数据\n", "rav_data = {}\n", "for i in tqdm(range(len(rav))):\n", " d = rav[i]\n", " role_name = d.split('/')[5]\n", " profile = d.split('/')[-1].split('.')[0].split('-')\n", " emo = profile[2]\n", " emo_inten = profile[3]\n", " role_id = role_name + '_' + emo\n", "\n", " if role_id not in rav_data:\n", " emo_id_num += 1\n", " rav_data[role_id] = [[d, wav2asr[d].strip(), emo_start_id + emo_id_num]]\n", " else:\n", " rav_data[role_id].append([d, wav2asr[d].strip(), emo_start_id + emo_id_num])\n", "\n", "rav_train, rav_val, rav_test = [], [], []\n", "rav_max_num = 1 #emo少\n", "for role_id in rav_data:\n", " d = rav_data[role_id]\n", " np.random.shuffle(d)\n", " d = d[:rav_max_num]\n", " \n", " rav_train += [f'{_d[0]}|{_d[1]}|{_d[2]}' for _d in d]\n", "\n", "print(len(rav_train), len(rav_val), len(rav_test))\n" ] }, { "cell_type": "code", "execution_count": 389, "id": "c37279a3-d2a6-4695-99eb-f52c202cb806", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "340 20 40\n", "192 0 0\n" ] } ], "source": [ "print(len(jl_train), len(jl_val), len(jl_test))\n", "print(len(rav_train), len(rav_val), len(rav_test))" ] }, { "cell_type": "code", "execution_count": null, "id": "4bc3b899-aff4-4d73-b3eb-f79de84ac568", "metadata": {}, "outputs": [], "source": [ "# 随机查看数据\n", "# # esd\n", "# sample_data = esd_train + esd_val + esd_test\n", "# sample_data = [d for d in sample_data if 'Surprise' in d]\n", "# tmp = random.choice(sample_data)\n", "# print(tmp)\n", "# display(ipd.Audio(tmp.split('|')[0], rate=24000, normalize=False))\n", "\n", "# # emov\n", "# sample_data = [d for d in emov if 'neu' in d]\n", "# # sample_data = emov\n", "# tmp = random.choice(sample_data)\n", "# print(tmp, wav2asr[tmp])\n", "# wave, sr = sf.read(tmp)\n", "# display(ipd.Audio(wave, rate=sr, normalize=False))\n", "\n", "# # jl\n", "# sample_data = [d for d in jl if '' in d[0]]\n", "# tmp = random.choice(sample_data)\n", "# print(tmp)\n", "# display(ipd.Audio(tmp[0], rate=sr, normalize=False))\n", "\n", "# # rav\n", "# sample_data = [d for d in rav]\n", "# # sample_data = emov\n", "# tmp = random.choice(sample_data)\n", "# print(tmp, wav2asr[tmp])\n", "# display(ipd.Audio(tmp, rate=sr, normalize=False))" ] }, { "cell_type": "code", "execution_count": 391, "id": "95e602d2-2144-4795-b39a-feec48dd26f8", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2107 245 265\n" ] } ], "source": [ "emo_train = esd_train + emov_train + jl_train + rav_train\n", "emo_val = esd_val + emov_val + jl_val + rav_val\n", "emo_test = esd_test + emov_test + jl_test + rav_test\n", "\n", "print(len(emo_train), len(emo_val), len(emo_test))" ] }, { "cell_type": "code", "execution_count": 459, "id": "65c347f5-bfc8-42a3-8dd7-f93c26b21916", "metadata": {}, "outputs": [], "source": [ "import re\n", "def process_text(text):\n", " new_text = re.sub(r'^\\.\\.\\.', '', text)\n", " return new_text" ] }, { "cell_type": "code", "execution_count": 470, "id": "5cbf0254-082c-4fcc-b173-005df662c231", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "libritts_val_data hours 3.830820000000008\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 2380/2380 [00:01<00:00, 1391.90it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "game_train hours 36.31573596808863\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 19338/19338 [00:15<00:00, 1235.76it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "game_val hours 3.908920038225938\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 2136/2136 [00:01<00:00, 1233.00it/s]\n", "100%|██████████| 2107/2107 [00:01<00:00, 1811.55it/s]\n", "100%|██████████| 245/245 [00:00<00:00, 1533.77it/s]\n" ] } ], "source": [ "# 进行phonemizer, mix data\n", "\n", "import random\n", "import numpy as np\n", "\n", "# 隐藏seed,可以更充分利用data\n", "random.seed(42)\n", "np.random.seed(42)\n", "\n", "train, val = [], []\n", "\n", "# np.random.shuffle(libritts_train_data)\n", "# libritts_train_data = libritts_train_data[:10000]\n", "# hours = sum([get_wav_duration(x.split('|')[0]) for x in libritts_train_data]) / 3600\n", "# print('libritts_train_data hours', hours)\n", "\n", "# for i in tqdm(range(len(libritts_train_data))):\n", "# line = libritts_train_data[i]\n", "# line = line.split('|')\n", "# wav, text, sid = line[0], line[1], line[2]\n", "# ps = preprocess(text, 'en-us')\n", "# train.append(f'{wav}|{ps}|{sid}')\n", "\n", "np.random.shuffle(libritts_val_data)\n", "sample_libritts_val_data = libritts_val_data[:2380]\n", "hours = sum([get_wav_duration(x.split('|')[0]) for x in sample_libritts_val_data]) / 3600\n", "print('libritts_val_data hours', hours)\n", "for i in tqdm(range(len(sample_libritts_val_data))):\n", " line = sample_libritts_val_data[i]\n", " line = line.split('|')\n", " wav, text, sid = line[0], line[1], line[2]\n", " ps = preprocess(process_text(text.strip()), 'en-us')\n", " val.append(f'{wav}|{ps}|{sid}')\n", "\n", "hours = sum([get_wav_duration(x.split('|')[0]) for x in game_en_train]) / 3600\n", "print('game_train hours', hours)\n", "\n", "for i in tqdm(range(len(game_en_train))):\n", " line = game_en_train[i]\n", " line = line.split('|')\n", " wav, text, sid = line[0], line[1], line[2]\n", " ps = preprocess(process_text(text.strip()), 'en-us')\n", " train.append(f'{wav}|{ps}|{sid}')\n", "\n", "hours = sum([get_wav_duration(x.split('|')[0]) for x in game_en_val]) / 3600\n", "print('game_val hours', hours)\n", "\n", "for i in tqdm(range(len(game_en_val))):\n", " line = game_en_val[i]\n", " line = line.split('|')\n", " wav, text, sid = line[0], line[1], line[2]\n", " ps = preprocess(process_text(text.strip()), 'en-us')\n", " val.append(f'{wav}|{ps}|{sid}')\n", "\n", "for i in tqdm(range(len(emo_train))):\n", " line = emo_train[i]\n", " line = line.split('|')\n", " wav, text, sid = line[0], line[1], line[2]\n", " second = get_wav_duration(wav)\n", " if second > 1:\n", " ps = preprocess(process_text(text.strip()), 'en-us')\n", " train.append(f'{wav}|{ps}|{sid}')\n", "\n", "for i in tqdm(range(len(emo_val))):\n", " line = emo_val[i]\n", " line = line.split('|')\n", " wav, text, sid = line[0], line[1], line[2]\n", " second = get_wav_duration(wav)\n", " if second > 1:\n", " ps = preprocess(process_text(text.strip()), 'en-us')\n", " val.append(f'{wav}|{ps}|{sid}')\n", "\n", "# np.random.shuffle(cml_train_data)\n", "# cml_train_data = cml_train_data[:25000]\n", "# hours = sum([get_wav_duration(x.split('|')[0]) for x in cml_train_data]) / 3600\n", "# print('cml_train_data hours', hours)\n", "# for i in tqdm(range(len(cml_train_data))):\n", "# line = cml_train_data[i]\n", "# line = line.split('|')\n", "# wav, text, sid = line[0], line[1], line[2]\n", "# ps = preprocess(text, 'es')\n", "# train.append(f'{wav}|{ps}|{sid}')\n", "\n", "# np.random.shuffle(cml_val_data)\n", "# cml_val_data = cml_val_data[:500]\n", "# hours = sum([get_wav_duration(x.split('|')[0]) for x in cml_val_data]) / 3600\n", "# print('cml_val_data hours', hours)\n", "# for i in tqdm(range(len(cml_val_data))):\n", "# line = cml_val_data[i]\n", "# line = line.split('|')\n", "# wav, text, sid = line[0], line[1], line[2]\n", "# ps = preprocess(text, 'es')\n", "# val.append(f'{wav}|{ps}|{sid}')\n" ] }, { "cell_type": "code", "execution_count": 471, "id": "fc762cfc-84c0-4280-b7d7-586c7bc293ca", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "21445\n", "38.01937940192716\n" ] } ], "source": [ "# data statistic\n", "hours = sum([get_wav_duration(x.split('|')[0]) for x in train])\n", "print(len(train))\n", "print(hours/3600)" ] }, { "cell_type": "code", "execution_count": 472, "id": "8d4f7db0-b01a-4f3e-9949-73d7d05e12d4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "4761\n", "7.950606687531487\n" ] } ], "source": [ "# data statistic\n", "hours = sum([get_wav_duration(x.split('|')[0]) for x in val])\n", "print(len(val))\n", "print(hours/3600)" ] }, { "cell_type": "code", "execution_count": 473, "id": "1e862965-6b93-40ad-bac6-1340325811e8", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "21445\n", "4761\n", "21440\n", "4752\n" ] } ], "source": [ "# 保证长度可以整除batch\n", "# 去除太长的样本\n", "\n", "from meldataset import TextCleaner\n", "\n", "text_cleaner = TextCleaner() # 对齐training\n", "\n", "bs = 16\n", "maxlen = 512-2 # bert最大长度512,并且要考虑开头和结尾的pad\n", "\n", "print(len(train))\n", "print(len(val))\n", "\n", "train = [d for d in train if len(text_cleaner(d.split('|')[1])) < maxlen]\n", "val = [d for d in val if len(text_cleaner(d.split('|')[1])) < maxlen]\n", "\n", "train = train[:int(len(train)/bs) * bs]\n", "val = val[:int(len(val)/bs) * bs]\n", "\n", "print(len(train))\n", "print(len(val))\n", "\n", "np.random.shuffle(train)\n", "\n", "with open('/workspace/TTS/tts/StyleTTS2/Data/train_list_en_21k_0401.txt', 'w') as f:\n", " for d in train:\n", " f.write(d+'\\n')\n", "\n", "with open('/workspace/TTS/tts/StyleTTS2/Data/val_list_en_21k_0401.txt', 'w') as f:\n", " for d in val:\n", " f.write(d+'\\n')" ] }, { "cell_type": "code", "execution_count": 474, "id": "14de8383-89fd-4a44-a8e4-377b8bb77180", "metadata": {}, "outputs": [], "source": [ "# !pip install langdetect" ] }, { "cell_type": "code", "execution_count": 475, "id": "f96880cd-ce73-4e69-bc55-c54dd735a260", "metadata": {}, "outputs": [], "source": [ "# # 新增OOD数据\n", "# import json, re, random\n", "# from tqdm import tqdm\n", "# from langdetect import detect\n", "# from nltk.tokenize import sent_tokenize\n", "\n", "# ood_data = []\n", "\n", "# waifu_data = []\n", "# with open('/workspace/TTS/data/sft_test.jsonl', 'r') as f:\n", "# for line in f:\n", "# line = json.loads(line.strip())\n", "# if line['lang'] == 'es':\n", "# waifu_data.append(line)\n", "\n", "# with open('/workspace/TTS/data/sft_val.jsonl', 'r') as f:\n", "# for line in f:\n", "# line = json.loads(line.strip())\n", "# if line['lang'] == 'es':\n", "# waifu_data.append(line)\n", "\n", "# def get_ood_data_waifu(text):\n", "\n", "# sentences = \n", " \n", "# sentences = sent_tokenize(text) # 拆分出来,还是太长\n", "# print('sen1', sentences)\n", "\n", "# sentences = [preprocess(sen.strip(), 'es') for sen in sentences]\n", "\n", "# if len(sentences) > 0:\n", "# sentence = random.choice(sentences)\n", "# print('sen4', sentence)\n", "# return sentence\n", "# else:\n", "# return None\n", "\n", "# for i in tqdm(range(len(waifu_data[:10]))):\n", "# d = waifu_data[i]\n", "# if len(d['context']) < 1:\n", "# continue\n", " \n", "# text = d['context'][0]['translate_content']\n", "\n", "# # print(text)\n", " \n", "# try:\n", "# if isinstance(text, str) and len(text) > 10 and detect(text[:100]) == 'es':\n", "# # 去除非西班牙数据\n", "# print('sen', text)\n", "# sen = get_ood_data_waifu(text)\n", "# if sen is not None:\n", "# ood_data.append(sen)\n", "# except:\n", "# pass\n", "\n", "# # if len(d['context']) < 2:\n", "# # continue\n", " \n", "# # text = d['context'][1]['translate_content']\n", "\n", "# # try:\n", "# # if isinstance(text, str) and len(text) > 10 and detect(text[:100]) == 'es':\n", "# # # 去除非西班牙数据\n", "# # sen = get_ood_data_waifu(text)\n", "# # if sen is not None:\n", "# # ood_data.append(sen)\n", "# # except:\n", "# # pass\n" ] }, { "cell_type": "code", "execution_count": 476, "id": "6227d8c1-40c6-4c5e-98f1-1cd135074adc", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "2674" ] }, "execution_count": 476, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(game_en_test)" ] }, { "cell_type": "code", "execution_count": 477, "id": "0f76809d-120c-4083-8f36-d599badf678c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "265" ] }, "execution_count": 477, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(emo_test)" ] }, { "cell_type": "code", "execution_count": 480, "id": "f262faf8-d7eb-4cc3-bd08-64de847f3ca0", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "5938\n" ] } ], "source": [ "from meldataset import TextCleaner\n", "\n", "text_cleaner = TextCleaner() # 对齐training\n", "\n", "# es_ood_data = []\n", "# with open('/workspace/TTS/tts/StyleTTS2/Data/OOD_texts.txt', 'r') as f:\n", "# # waifu数据\n", "# for line in f:\n", "# line = line.strip().split('|')\n", "# text, sid = line[0], line[1]\n", "# es_ood_data.append(preprocess(text, 'es'))\n", "\n", "# # +其他西班牙语\n", "# for d in cml_test_data[:1500]:\n", "# es_ood_data.append(preprocess(d.split('|')[1], 'es'))\n", "\n", "# +英语,加入wav_path\n", "en_ood_data = []\n", "for d in libritts_test_data[:3000]:\n", " en_ood_data.append([d.split('|')[0], preprocess(d.split('|')[1], 'en-us')])\n", "\n", "for d in game_en_test:\n", " # en_ood_data.append([d.split('|')[0], d.split('|')[1]])\n", " en_ood_data.append([d.split('|')[0], preprocess(process_text(d.split('|')[1].strip()), 'en-us')])\n", "\n", "for d in emo_test:\n", " en_ood_data.append([d.split('|')[0], preprocess(process_text(d.split('|')[1].strip()), 'en-us')])\n", " \n", "# es_ood_data = list(set(es_ood_data))\n", "# en_ood_data = list(set(en_ood_data))\n", "\n", "# 去掉text cleaner之后超长的内容\n", "# es_ood_data = [d for d in es_ood_data if len(text_cleaner(d)) < 510]\n", "en_ood_data = [d for d in en_ood_data if len(text_cleaner(d[1])) < 510]\n", "\n", "# print(len(es_ood_data))\n", "print(len(en_ood_data))\n" ] }, { "cell_type": "code", "execution_count": 481, "id": "3be67931-2506-44f0-b394-56cb6ecb722f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "5938\n" ] } ], "source": [ "# es_ood_data = [f'{d}|0|es' for d in es_ood_data]\n", "en_ood_data = [f'{d[0]}|{d[1]}|en' for d in en_ood_data]\n", "ood_data = en_ood_data\n", "# ood_data = es_ood_data + en_ood_data\n", "np.random.shuffle(ood_data)\n", "print(len(ood_data))\n", "\n", "with open('/workspace/TTS/tts/StyleTTS2/Data/OOD_texts_en_6k_0401.txt', 'w') as f:\n", " for d in ood_data:\n", " f.write(d+'\\n')" ] }, { "cell_type": "code", "execution_count": null, "id": "2dda822d-24f7-4bb4-882f-7b9711c8e983", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "56ed7106-07d2-45aa-9fc1-1286a3a956bd", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "51992195-2507-4b56-8dd0-6a7876decf0e", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "56e4670b-598b-4c08-8137-5475f87e8aa2", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" } }, "nbformat": 4, "nbformat_minor": 5 }