Added Model
Browse files- .gitattributes +1 -0
- README.md +168 -0
- config.yml +74 -0
- events.out.tfevents.1722578197.s44504-focus-slate.43856.0.v2 +3 -0
- lightspeech_quant.onnx +3 -0
- lightspeech_quant.ort +3 -0
- lightspeech_quant.tflite +3 -0
- model.h5 +3 -0
- processor.json +1 -0
.gitattributes
CHANGED
|
@@ -14,6 +14,7 @@
|
|
| 14 |
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 17 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 14 |
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ort filter=lfs diff=lfs merge=lfs -text
|
| 18 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 19 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 20 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,3 +1,171 @@
|
|
| 1 |
---
|
|
|
|
| 2 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
language: sw
|
| 3 |
license: apache-2.0
|
| 4 |
+
tags:
|
| 5 |
+
- tensorflowtts
|
| 6 |
+
- audio
|
| 7 |
+
- text-to-speech
|
| 8 |
+
- text-to-mel
|
| 9 |
+
inference: false
|
| 10 |
+
datasets:
|
| 11 |
+
- bookbot/sw-TZ-Victoria
|
| 12 |
+
- bookbot/sw-TZ-Victoria-syllables
|
| 13 |
+
- bookbot/sw-TZ-Victoria-v2
|
| 14 |
+
- bookbot/sw-TZ-VictoriaNeural
|
| 15 |
---
|
| 16 |
+
|
| 17 |
+
# LightSpeech MFA SW v4
|
| 18 |
+
|
| 19 |
+
LightSpeech MFA SW v4 is a text-to-mel-spectrogram model based on the [LightSpeech](https://arxiv.org/abs/2102.04040) architecture. This model was fine-tuned from [LightSpeech MFA SW v1](https://huggingface.co/bookbot/lightspeech-mfa-sw-v1) and trained on real and synthetic audio datasets. The list of speakers include:
|
| 20 |
+
|
| 21 |
+
- sw-TZ-Victoria
|
| 22 |
+
- sw-TZ-Victoria-syllables
|
| 23 |
+
- sw-TZ-Victoria-v2
|
| 24 |
+
- sw-TZ-VictoriaNeural
|
| 25 |
+
|
| 26 |
+
We trained an acoustic Swahili model on our speech corpus using [Montreal Forced Aligner v3.0.0](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) and used it as the duration extractor. That model, and consequently our model, uses the IPA phone set for Swahili. We used [gruut](https://github.com/rhasspy/gruut) for phonemization purposes. We followed these [steps](https://github.com/TensorSpeech/TensorFlowTTS/tree/master/examples/mfa_extraction) to perform duration extraction.
|
| 27 |
+
|
| 28 |
+
This model was trained using the [TensorFlowTTS](https://github.com/TensorSpeech/TensorFlowTTS) framework. All training was done on a RTX 4090 GPU. All necessary scripts used for training could be found in this [Github Fork](https://github.com/bookbot-hive/TensorFlowTTS), as well as the [Training metrics](https://huggingface.co/bookbot/lightspeech-mfa-sw-v4/tensorboard) logged via Tensorboard.
|
| 29 |
+
|
| 30 |
+
## Model
|
| 31 |
+
|
| 32 |
+
| Model | Config | SR (Hz) | Mel range (Hz) | FFT / Hop / Win (pt) | #steps |
|
| 33 |
+
| ----------------------- | --------------------------------------------------------------------------------- | ------- | -------------- | -------------------- | ------ |
|
| 34 |
+
| `lightspeech-mfa-sw-v4` | [Link](https://huggingface.co/bookbot/lightspeech-mfa-sw-v4/blob/main/config.yml) | 44.1K | 20-11025 | 2048 / 512 / None | 200K |
|
| 35 |
+
|
| 36 |
+
## Training Procedure
|
| 37 |
+
|
| 38 |
+
<details>
|
| 39 |
+
<summary>Feature Extraction Setting</summary>
|
| 40 |
+
|
| 41 |
+
hop_size: 512 # Hop size.
|
| 42 |
+
format: "npy"
|
| 43 |
+
|
| 44 |
+
</details>
|
| 45 |
+
|
| 46 |
+
<details>
|
| 47 |
+
<summary>Network Architecture Setting</summary>
|
| 48 |
+
|
| 49 |
+
model_type: lightspeech
|
| 50 |
+
lightspeech_params:
|
| 51 |
+
dataset: "swahiliipa"
|
| 52 |
+
n_speakers: 1
|
| 53 |
+
encoder_hidden_size: 256
|
| 54 |
+
encoder_num_hidden_layers: 3
|
| 55 |
+
encoder_num_attention_heads: 2
|
| 56 |
+
encoder_attention_head_size: 16
|
| 57 |
+
encoder_intermediate_size: 1024
|
| 58 |
+
encoder_intermediate_kernel_size:
|
| 59 |
+
- 5
|
| 60 |
+
- 25
|
| 61 |
+
- 13
|
| 62 |
+
- 9
|
| 63 |
+
encoder_hidden_act: "mish"
|
| 64 |
+
decoder_hidden_size: 256
|
| 65 |
+
decoder_num_hidden_layers: 3
|
| 66 |
+
decoder_num_attention_heads: 2
|
| 67 |
+
decoder_attention_head_size: 16
|
| 68 |
+
decoder_intermediate_size: 1024
|
| 69 |
+
decoder_intermediate_kernel_size:
|
| 70 |
+
- 17
|
| 71 |
+
- 21
|
| 72 |
+
- 9
|
| 73 |
+
- 13
|
| 74 |
+
decoder_hidden_act: "mish"
|
| 75 |
+
variant_prediction_num_conv_layers: 2
|
| 76 |
+
variant_predictor_filter: 256
|
| 77 |
+
variant_predictor_kernel_size: 3
|
| 78 |
+
variant_predictor_dropout_rate: 0.5
|
| 79 |
+
num_mels: 80
|
| 80 |
+
hidden_dropout_prob: 0.2
|
| 81 |
+
attention_probs_dropout_prob: 0.1
|
| 82 |
+
max_position_embeddings: 2048
|
| 83 |
+
initializer_range: 0.02
|
| 84 |
+
output_attentions: False
|
| 85 |
+
output_hidden_states: False
|
| 86 |
+
|
| 87 |
+
</details>
|
| 88 |
+
|
| 89 |
+
<details>
|
| 90 |
+
<summary>Data Loader Setting</summary>
|
| 91 |
+
|
| 92 |
+
batch_size: 16 # Batch size for each GPU with assuming that gradient_accumulation_steps == 1.
|
| 93 |
+
eval_batch_size: 16
|
| 94 |
+
remove_short_samples: true # Whether to remove samples the length of which are less than batch_max_steps.
|
| 95 |
+
allow_cache: true # Whether to allow cache in dataset. If true, it requires cpu memory.
|
| 96 |
+
mel_length_threshold: 32 # remove all targets has mel_length <= 32
|
| 97 |
+
is_shuffle: true # shuffle dataset after each epoch.
|
| 98 |
+
|
| 99 |
+
</details>
|
| 100 |
+
|
| 101 |
+
<details>
|
| 102 |
+
<summary>Optimizer & Scheduler Setting</summary>
|
| 103 |
+
|
| 104 |
+
optimizer_params:
|
| 105 |
+
initial_learning_rate: 0.0001
|
| 106 |
+
end_learning_rate: 0.00005
|
| 107 |
+
decay_steps: 150000 # < train_max_steps is recommend.
|
| 108 |
+
warmup_proportion: 0.02
|
| 109 |
+
weight_decay: 0.001
|
| 110 |
+
|
| 111 |
+
gradient_accumulation_steps: 1
|
| 112 |
+
var_train_expr:
|
| 113 |
+
null # trainable variable expr (eg. 'embeddings|encoder|decoder' )
|
| 114 |
+
# must separate by |. if var_train_expr is null then we
|
| 115 |
+
# training all variable
|
| 116 |
+
|
| 117 |
+
</details>
|
| 118 |
+
|
| 119 |
+
<details>
|
| 120 |
+
<summary>Interval Setting</summary>
|
| 121 |
+
|
| 122 |
+
train_max_steps: 200000 # Number of training steps.
|
| 123 |
+
save_interval_steps: 5000 # Interval steps to save checkpoint.
|
| 124 |
+
eval_interval_steps: 5000 # Interval steps to evaluate the network.
|
| 125 |
+
log_interval_steps: 200 # Interval steps to record the training log.
|
| 126 |
+
delay_f0_energy_steps: 3 # 2 steps use LR outputs only then 1 steps LR + F0 + Energy.
|
| 127 |
+
|
| 128 |
+
</details>
|
| 129 |
+
|
| 130 |
+
<details>
|
| 131 |
+
<summary>Other Setting</summary>
|
| 132 |
+
|
| 133 |
+
num_save_intermediate_results: 1 # Number of batch to be saved as intermediate results.
|
| 134 |
+
|
| 135 |
+
</details>
|
| 136 |
+
|
| 137 |
+
## How to Use
|
| 138 |
+
|
| 139 |
+
```py
|
| 140 |
+
import tensorflow as tf
|
| 141 |
+
from tensorflow_tts.inference import TFAutoModel, AutoProcessor
|
| 142 |
+
|
| 143 |
+
lightspeech = TFAutoModel.from_pretrained("bookbot/lightspeech-mfa-sw-v4")
|
| 144 |
+
processor = AutoProcessor.from_pretrained("bookbot/lightspeech-mfa-sw-v4")
|
| 145 |
+
|
| 146 |
+
text, speaker_name = "Hello World", "sw-TZ-Victoria"
|
| 147 |
+
input_ids = processor.text_to_sequence(text)
|
| 148 |
+
|
| 149 |
+
mel, duration_outputs, _ = lightspeech.inference(
|
| 150 |
+
input_ids=tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
|
| 151 |
+
speaker_ids=tf.convert_to_tensor(
|
| 152 |
+
[processor.speakers_map[speaker_name]], dtype=tf.int32
|
| 153 |
+
),
|
| 154 |
+
speed_ratios=tf.convert_to_tensor([1.0], dtype=tf.float32),
|
| 155 |
+
f0_ratios=tf.convert_to_tensor([1.0], dtype=tf.float32),
|
| 156 |
+
energy_ratios=tf.convert_to_tensor([1.0], dtype=tf.float32),
|
| 157 |
+
)
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
## Disclaimer
|
| 161 |
+
|
| 162 |
+
Do consider the biases which came from pre-training datasets that may be carried over into the results of this model.
|
| 163 |
+
|
| 164 |
+
## Authors
|
| 165 |
+
|
| 166 |
+
LightSpeech MFA SW v4 was trained and evaluated by [David Samuel Setiawan](https://davidsamuell.github.io/), [Wilson Wongso](https://wilsonwongso.dev/). All computation and development are done on local machines.
|
| 167 |
+
|
| 168 |
+
## Framework versions
|
| 169 |
+
|
| 170 |
+
- TensorFlowTTS 1.8
|
| 171 |
+
- TensorFlow 2.12.0
|
config.yml
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
allow_cache: true
|
| 2 |
+
batch_size: 16
|
| 3 |
+
config: ./TensorFlowTTS/examples/lightspeech/conf/lightspeech_swahiliipa.ft.yaml
|
| 4 |
+
dataset_config: TensorFlowTTS/preprocess/swahiliipa_preprocess.yaml
|
| 5 |
+
dataset_mapping: dump/swahiliipa_mapper.json
|
| 6 |
+
dataset_stats: dump/stats.npy
|
| 7 |
+
delay_f0_energy_steps: 3
|
| 8 |
+
dev_dir: ./dump/valid/
|
| 9 |
+
energy_stat: ./dump/stats_energy.npy
|
| 10 |
+
eval_batch_size: 16
|
| 11 |
+
eval_interval_steps: 5000
|
| 12 |
+
f0_stat: ./dump/stats_f0.npy
|
| 13 |
+
format: npy
|
| 14 |
+
gradient_accumulation_steps: 1
|
| 15 |
+
hop_size: 512
|
| 16 |
+
is_shuffle: true
|
| 17 |
+
lightspeech_params:
|
| 18 |
+
attention_probs_dropout_prob: 0.1
|
| 19 |
+
dataset: swahiliipa
|
| 20 |
+
decoder_attention_head_size: 16
|
| 21 |
+
decoder_hidden_act: mish
|
| 22 |
+
decoder_hidden_size: 256
|
| 23 |
+
decoder_intermediate_kernel_size:
|
| 24 |
+
- 17
|
| 25 |
+
- 21
|
| 26 |
+
- 9
|
| 27 |
+
- 13
|
| 28 |
+
decoder_intermediate_size: 1024
|
| 29 |
+
decoder_num_attention_heads: 2
|
| 30 |
+
decoder_num_hidden_layers: 3
|
| 31 |
+
encoder_attention_head_size: 16
|
| 32 |
+
encoder_hidden_act: mish
|
| 33 |
+
encoder_hidden_size: 256
|
| 34 |
+
encoder_intermediate_kernel_size:
|
| 35 |
+
- 5
|
| 36 |
+
- 25
|
| 37 |
+
- 13
|
| 38 |
+
- 9
|
| 39 |
+
encoder_intermediate_size: 1024
|
| 40 |
+
encoder_num_attention_heads: 2
|
| 41 |
+
encoder_num_hidden_layers: 3
|
| 42 |
+
hidden_dropout_prob: 0.2
|
| 43 |
+
initializer_range: 0.02
|
| 44 |
+
max_position_embeddings: 2048
|
| 45 |
+
n_speakers: 1
|
| 46 |
+
num_mels: 80
|
| 47 |
+
output_attentions: false
|
| 48 |
+
output_hidden_states: false
|
| 49 |
+
variant_prediction_num_conv_layers: 2
|
| 50 |
+
variant_predictor_dropout_rate: 0.5
|
| 51 |
+
variant_predictor_filter: 256
|
| 52 |
+
variant_predictor_kernel_size: 3
|
| 53 |
+
log_interval_steps: 200
|
| 54 |
+
mel_length_threshold: 32
|
| 55 |
+
mixed_precision: true
|
| 56 |
+
model_type: lightspeech
|
| 57 |
+
num_save_intermediate_results: 1
|
| 58 |
+
optimizer_params:
|
| 59 |
+
decay_steps: 150000
|
| 60 |
+
end_learning_rate: 5.0e-05
|
| 61 |
+
initial_learning_rate: 0.0001
|
| 62 |
+
warmup_proportion: 0.02
|
| 63 |
+
weight_decay: 0.001
|
| 64 |
+
outdir: ./lightspeech-sw-tz-victoria-ft-vocab-exp-synth-v2
|
| 65 |
+
pretrained: lightspeech-mfa-sw-v1/model.h5
|
| 66 |
+
remove_short_samples: true
|
| 67 |
+
resume: ''
|
| 68 |
+
save_interval_steps: 5000
|
| 69 |
+
train_dir: ./dump/train/
|
| 70 |
+
train_max_steps: 200000
|
| 71 |
+
use_norm: true
|
| 72 |
+
var_train_expr: null
|
| 73 |
+
verbose: 1
|
| 74 |
+
version: '0.0'
|
events.out.tfevents.1722578197.s44504-focus-slate.43856.0.v2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:195a34286ac43890f0fe17cef76ee993187504fda0468d2008f57441f5d41ee2
|
| 3 |
+
size 234746
|
lightspeech_quant.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f9878e0a686f5237d57364e89acda8126a3da7b231453eb4d419492653a366c
|
| 3 |
+
size 4663604
|
lightspeech_quant.ort
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc6a103fddc6e1cee984b1536d405be50bd0edadec686148affef986baa72907
|
| 3 |
+
size 4866192
|
lightspeech_quant.tflite
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89cadea98831ad7ad1f933a3cb11ae6997a2ae77687cbeb2c432334f7ddc07ab
|
| 3 |
+
size 4719208
|
model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e08dd1dd0454b3b450e5f1ae0812f8c2e3186230c8298044eed84fc88f8232b
|
| 3 |
+
size 19485104
|
processor.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"symbol_to_id": {"@PAD": 0, "@f": 1, "@h": 2, "@i": 3, "@j": 4, "@k": 5, "@l": 6, "@m": 7, "@n": 8, "@p": 9, "@s": 10, "@t": 11, "@t\u0361\u0283": 12, "@u": 13, "@v": 14, "@w": 15, "@x": 16, "@z": 17, "@\u00f0": 18, "@\u014b": 19, "@\u0251": 20, "@\u0253": 21, "@\u0254": 22, "@\u0257": 23, "@\u025b": 24, "@\u0260": 25, "@\u0263": 26, "@\u027e": 27, "@\u0283": 28, "@\u0284": 29, "@\u03b8": 30, "@\u1d50\u0253": 31, "@\u1d51g": 32, "@\u1dacv": 33, "@\u207fz": 34, "@\u207f\u0257": 35, "@\u207f\u0257\u0361\u0292": 36, "!": 37, ",": 38, ".": 39, "?": 40, ";": 41, ":": 42, "@SIL": 43, "@EOS": 44}, "id_to_symbol": {"0": "@PAD", "1": "@f", "2": "@h", "3": "@i", "4": "@j", "5": "@k", "6": "@l", "7": "@m", "8": "@n", "9": "@p", "10": "@s", "11": "@t", "12": "@t\u0361\u0283", "13": "@u", "14": "@v", "15": "@w", "16": "@x", "17": "@z", "18": "@\u00f0", "19": "@\u014b", "20": "@\u0251", "21": "@\u0253", "22": "@\u0254", "23": "@\u0257", "24": "@\u025b", "25": "@\u0260", "26": "@\u0263", "27": "@\u027e", "28": "@\u0283", "29": "@\u0284", "30": "@\u03b8", "31": "@\u1d50\u0253", "32": "@\u1d51g", "33": "@\u1dacv", "34": "@\u207fz", "35": "@\u207f\u0257", "36": "@\u207f\u0257\u0361\u0292", "37": "!", "38": ",", "39": ".", "40": "?", "41": ";", "42": ":", "43": "@SIL", "44": "@EOS"}, "speakers_map": {"sw-TZ-Victoria": 0}, "processor_name": "SwahiliIPAProcessor"}
|