Upload folder using huggingface_hub
Browse files- LICENSE +201 -0
- README.md +68 -0
- audio/__init__.py +6 -0
- audio/player.py +159 -0
- audio/streaming.py +152 -0
- benchmark_rtf_warm.py +60 -0
- config.py +53 -0
- generation/__init__.py +6 -0
- generation/chunking.py +100 -0
- generation/vllm_generator.py +363 -0
- render_radio_play.py +198 -0
- requirements.txt +9 -0
- server.py +422 -0
- test_rtf.py +54 -0
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity granting the License.
|
| 13 |
+
|
| 14 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 15 |
+
other entities that control, are controlled by, or are under common
|
| 16 |
+
control with that entity. For the purposes of this definition,
|
| 17 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 18 |
+
direction or management of such entity, whether by contract or
|
| 19 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 20 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 21 |
+
|
| 22 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 23 |
+
exercising permissions granted by this License.
|
| 24 |
+
|
| 25 |
+
"Source" shall mean the preferred form for making modifications,
|
| 26 |
+
including but not limited to software source code, documentation
|
| 27 |
+
source, and configuration files.
|
| 28 |
+
|
| 29 |
+
"Object" shall mean any form resulting from mechanical
|
| 30 |
+
transformation or translation of a Source form, including but
|
| 31 |
+
not limited to compiled object code, generated documentation,
|
| 32 |
+
and conversions to other media types.
|
| 33 |
+
|
| 34 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 35 |
+
Object form, made available under the License, as indicated by a
|
| 36 |
+
copyright notice that is included in or attached to the work
|
| 37 |
+
(which shall not include communications that are solely in the
|
| 38 |
+
nature of comments, suggestions or bug reports, but does not
|
| 39 |
+
include comments that are part of a notice similar to the Exemplary
|
| 40 |
+
legal notices set forth below).
|
| 41 |
+
|
| 42 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 43 |
+
form, that is based upon (or derived from) the Work and for which the
|
| 44 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 45 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 46 |
+
of this License, Derivative Works shall not include works that remain
|
| 47 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 48 |
+
the Work and derivative works thereof.
|
| 49 |
+
|
| 50 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 51 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 52 |
+
subsequently incorporated within the Work.
|
| 53 |
+
|
| 54 |
+
"Contribution" shall mean any work of authorship, including
|
| 55 |
+
the original version of the Work and any modifications or additions
|
| 56 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 57 |
+
submitted to Licensor for inclusion in the Work by the copyright
|
| 58 |
+
owner or by an individual or Legal Entity authorized to submit on
|
| 59 |
+
behalf of the copyright owner. For the purposes of this definition,
|
| 60 |
+
"submitted" means any form of electronic, verbal, or written
|
| 61 |
+
communication sent to the Licensor or its representatives, including
|
| 62 |
+
but not limited to communication on electronic mailing lists,
|
| 63 |
+
source code control systems, and issue tracking systems that are
|
| 64 |
+
managed by, or on behalf of, the Licensor for the purpose of
|
| 65 |
+
discussing and improving the Work, but excluding communication that
|
| 66 |
+
is conspicuously marked or otherwise designated in writing by the
|
| 67 |
+
copyright owner as "Not a Contribution."
|
| 68 |
+
|
| 69 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 70 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 71 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 72 |
+
copyright license to use, reproduce, modify, display, perform,
|
| 73 |
+
sublicense, and distribute the Work and such Derivative Works in
|
| 74 |
+
Source or Object form.
|
| 75 |
+
|
| 76 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 77 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 78 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 79 |
+
(except as stated in this section) patent license to make, have made,
|
| 80 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 81 |
+
where such license applies only to those patent claims licensable
|
| 82 |
+
by such Contributor that are necessarily infringed by their
|
| 83 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 84 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 85 |
+
institute patent litigation against any entity (including a
|
| 86 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 87 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 88 |
+
or contributory patent infringement, then any patent licenses
|
| 89 |
+
granted to You under this License for that Work shall terminate
|
| 90 |
+
as of the date such litigation is filed.
|
| 91 |
+
|
| 92 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 93 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 94 |
+
modifications, and in Source or Object form, provided that You
|
| 95 |
+
meet the following conditions:
|
| 96 |
+
|
| 97 |
+
(a) You must give any other recipients of the Work or
|
| 98 |
+
Derivative Works a copy of this License; and
|
| 99 |
+
|
| 100 |
+
(b) You must cause any modified files to carry prominent notices
|
| 101 |
+
stating that You changed the files; and
|
| 102 |
+
|
| 103 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 104 |
+
that You distribute, all copyright, patent, trademark, and
|
| 105 |
+
attribution notices from the Source form of the Work,
|
| 106 |
+
excluding those notices that do not pertain to any part of
|
| 107 |
+
the Derivative Works; and
|
| 108 |
+
|
| 109 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 110 |
+
distribution, then any Derivative Works that You distribute must
|
| 111 |
+
include a readable copy of the attribution notices contained
|
| 112 |
+
within such NOTICE file, excluding those notices that do not
|
| 113 |
+
pertain to any part of the Derivative Works, in at least one
|
| 114 |
+
of the following places: within a NOTICE text file distributed
|
| 115 |
+
as part of the Derivative Works; within the Source form or
|
| 116 |
+
documentation, if provided along with the Derivative Works; or,
|
| 117 |
+
within a display generated by the Derivative Works, if and
|
| 118 |
+
wherever such third-party notices normally appear. The contents
|
| 119 |
+
of the NOTICE file are for informational purposes only and
|
| 120 |
+
do not modify the License. You may add Your own attribution
|
| 121 |
+
notices within Derivative Works that You distribute, alongside
|
| 122 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 123 |
+
that such additional attribution notices cannot be construed
|
| 124 |
+
as modifying the License.
|
| 125 |
+
|
| 126 |
+
You may add Your own copyright notice and may provide additional or
|
| 127 |
+
different license terms and conditions for use, reproduction, or
|
| 128 |
+
distribution of Your Derivative Works as a whole, provided Your use,
|
| 129 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 130 |
+
the conditions stated in this License.
|
| 131 |
+
|
| 132 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 133 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 134 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 135 |
+
this License, without any additional terms or conditions.
|
| 136 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 137 |
+
the terms of any separate license agreement you may have executed
|
| 138 |
+
with Licensor regarding such Contributions.
|
| 139 |
+
|
| 140 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 141 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 142 |
+
except as required for reasonable and customary use in describing the
|
| 143 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 144 |
+
|
| 145 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 146 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 147 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 148 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 149 |
+
implied, including, without limitation, any warranties or conditions
|
| 150 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 151 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 152 |
+
appropriateness of using or redistributing the Work and assume any
|
| 153 |
+
risks associated with Your exercise of permissions under this License.
|
| 154 |
+
|
| 155 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 156 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 157 |
+
unless required by applicable law (such as deliberate and grossly
|
| 158 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 159 |
+
liable to You for damages, including any direct, indirect, special,
|
| 160 |
+
incidental, or consequential damages of any character arising as a
|
| 161 |
+
result of this License or out of the use or inability to use the
|
| 162 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 163 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 164 |
+
other commercial damages or losses), even if such Contributor
|
| 165 |
+
has been advised of the possibility of such damages.
|
| 166 |
+
|
| 167 |
+
9. Accepting Warranty or Support. You may choose to offer, and to
|
| 168 |
+
charge a fee for, warranty, support, indemnity, or other liability
|
| 169 |
+
obligations and/or rights consistent with this License. However, in
|
| 170 |
+
accepting such obligations, You may act only on Your own behalf and on
|
| 171 |
+
Your sole responsibility, not on behalf of any other Contributor, and
|
| 172 |
+
only if You agree to indemnify, defend, and hold each Contributor
|
| 173 |
+
harmless for any liability incurred by, or claims asserted against,
|
| 174 |
+
such Contributor by reason of your accepting any such warranty or support.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same page as the copyright notice for easier identification within
|
| 187 |
+
third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Bulgarian Ref-Audio KaniTTS Server Code
|
| 2 |
+
|
| 3 |
+
Това repo съдържа само server кода за локално ползване и изтегляне.
|
| 4 |
+
|
| 5 |
+
То не е Hugging Face Space за хостван inference. Идеята е потребителят да може да го свали и да го пусне на собствена машина с GPU.
|
| 6 |
+
|
| 7 |
+
## Какво има вътре
|
| 8 |
+
|
| 9 |
+
- `server.py` — FastAPI endpoint `/v1/audio/speech`
|
| 10 |
+
- `config.py` — конфигурация и model repo id
|
| 11 |
+
- `audio/` — codec decode и reference audio encode логика
|
| 12 |
+
- `generation/` — vLLM prompt/generation логика
|
| 13 |
+
- `render_radio_play.py` — оркестратор за дълги сценарии и радио пиеси
|
| 14 |
+
- `test_rtf.py` — еднократен RTF benchmark
|
| 15 |
+
- `benchmark_rtf_warm.py` — warm benchmark в един процес
|
| 16 |
+
|
| 17 |
+
## Модел
|
| 18 |
+
|
| 19 |
+
По подразбиране кодът очаква публикувания модел:
|
| 20 |
+
|
| 21 |
+
- `beleata74/kani-tts-400m-bg-refaudio5s`
|
| 22 |
+
|
| 23 |
+
Можеш да override-неш това с env variable:
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
export KANITTS_MODEL_PATH=beleata74/kani-tts-400m-bg-refaudio5s
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
## Бърз старт
|
| 30 |
+
|
| 31 |
+
```bash
|
| 32 |
+
git clone https://huggingface.co/datasets/beleata74/kani-tts-bg-refaudio-server-code
|
| 33 |
+
cd kani-tts-bg-refaudio-server-code
|
| 34 |
+
python -m venv .venv
|
| 35 |
+
source .venv/bin/activate
|
| 36 |
+
pip install -r requirements.txt
|
| 37 |
+
python server.py
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
После:
|
| 41 |
+
|
| 42 |
+
```bash
|
| 43 |
+
curl http://localhost:8000/health
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
Примерна заявка:
|
| 47 |
+
|
| 48 |
+
```bash
|
| 49 |
+
curl -X POST http://localhost:8000/v1/audio/speech \
|
| 50 |
+
-H "Content-Type: application/json" \
|
| 51 |
+
-d '{
|
| 52 |
+
"input": "Това е тест на българския ref-audio сървър.",
|
| 53 |
+
"reference_audio_path": "/path/to/reference.wav",
|
| 54 |
+
"response_format": "wav"
|
| 55 |
+
}' \
|
| 56 |
+
--output speech.wav
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
## Изисквания
|
| 60 |
+
|
| 61 |
+
- Linux
|
| 62 |
+
- NVIDIA GPU
|
| 63 |
+
- CUDA-съвместим PyTorch/vLLM setup
|
| 64 |
+
- `ffmpeg`
|
| 65 |
+
|
| 66 |
+
## Бележка
|
| 67 |
+
|
| 68 |
+
Има и отделен Hugging Face Space repo за същия код, но това repo е качено специално като downloadable code package.
|
audio/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audio processing modules for Kani TTS"""
|
| 2 |
+
|
| 3 |
+
from .player import LLMAudioPlayer
|
| 4 |
+
from .streaming import StreamingAudioWriter
|
| 5 |
+
|
| 6 |
+
__all__ = ['LLMAudioPlayer', 'StreamingAudioWriter']
|
audio/player.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audio player for LLM-generated speech tokens"""
|
| 2 |
+
|
| 3 |
+
import io
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import soundfile as sf
|
| 7 |
+
import torch
|
| 8 |
+
from nemo.collections.tts.models import AudioCodecModel
|
| 9 |
+
from scipy.signal import resample_poly
|
| 10 |
+
|
| 11 |
+
from config import (
|
| 12 |
+
TOKENIZER_LENGTH, START_OF_TEXT, END_OF_TEXT,
|
| 13 |
+
START_OF_SPEECH, END_OF_SPEECH, START_OF_HUMAN, END_OF_HUMAN,
|
| 14 |
+
START_OF_AI, END_OF_AI, PAD_TOKEN, AUDIO_TOKENS_START, CODEBOOK_SIZE,
|
| 15 |
+
CODEC_MODEL_NAME, NUM_CODEBOOKS, SAMPLE_RATE, REF_AUDIO_SECONDS,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class LLMAudioPlayer:
|
| 20 |
+
def __init__(self, tokenizer) -> None:
|
| 21 |
+
self.nemo_codec_model = AudioCodecModel\
|
| 22 |
+
.from_pretrained(CODEC_MODEL_NAME).eval()
|
| 23 |
+
|
| 24 |
+
if torch.cuda.is_available():
|
| 25 |
+
self.device = 'cuda'
|
| 26 |
+
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
| 27 |
+
self.device = 'mps'
|
| 28 |
+
else:
|
| 29 |
+
self.device = 'cpu'
|
| 30 |
+
|
| 31 |
+
self.nemo_codec_model.to(self.device)
|
| 32 |
+
|
| 33 |
+
# NOTE: torch.compile disabled for codec due to varying input shapes causing recompilations
|
| 34 |
+
# The codec is called with different frame counts, which triggers too many recompilations
|
| 35 |
+
|
| 36 |
+
self.tokenizer = tokenizer
|
| 37 |
+
|
| 38 |
+
self.tokenizer_length = TOKENIZER_LENGTH
|
| 39 |
+
self.start_of_text = START_OF_TEXT
|
| 40 |
+
self.end_of_text = END_OF_TEXT
|
| 41 |
+
self.start_of_speech = START_OF_SPEECH
|
| 42 |
+
self.end_of_speech = END_OF_SPEECH
|
| 43 |
+
self.start_of_human = START_OF_HUMAN
|
| 44 |
+
self.end_of_human = END_OF_HUMAN
|
| 45 |
+
self.start_of_ai = START_OF_AI
|
| 46 |
+
self.end_of_ai = END_OF_AI
|
| 47 |
+
self.pad_token = PAD_TOKEN
|
| 48 |
+
self.audio_tokens_start = AUDIO_TOKENS_START
|
| 49 |
+
self.codebook_size = CODEBOOK_SIZE
|
| 50 |
+
self.num_codebooks = NUM_CODEBOOKS
|
| 51 |
+
self.sample_rate = SAMPLE_RATE
|
| 52 |
+
|
| 53 |
+
def output_validation(self, out_ids):
|
| 54 |
+
start_of_speech_flag = self.start_of_speech in out_ids
|
| 55 |
+
end_of_speech_flag = self.end_of_speech in out_ids
|
| 56 |
+
if not (start_of_speech_flag and end_of_speech_flag):
|
| 57 |
+
raise ValueError('Special speech tokens not exist!')
|
| 58 |
+
|
| 59 |
+
def get_nano_codes(self, out_ids):
|
| 60 |
+
start_a_idx = (out_ids == self.start_of_speech).nonzero(as_tuple=True)[0].item()
|
| 61 |
+
end_a_idx = (out_ids == self.end_of_speech).nonzero(as_tuple=True)[0].item()
|
| 62 |
+
if start_a_idx >= end_a_idx:
|
| 63 |
+
raise ValueError('Invalid audio codes sequence!')
|
| 64 |
+
|
| 65 |
+
audio_codes = out_ids[start_a_idx+1 : end_a_idx]
|
| 66 |
+
if len(audio_codes) % self.num_codebooks:
|
| 67 |
+
raise ValueError(f'The length of the sequence must be a multiple of {self.num_codebooks}!')
|
| 68 |
+
audio_codes = audio_codes.reshape(-1, self.num_codebooks)
|
| 69 |
+
audio_codes = audio_codes - torch.tensor([self.codebook_size * i for i in range(self.num_codebooks)])
|
| 70 |
+
audio_codes = audio_codes - self.audio_tokens_start
|
| 71 |
+
if (audio_codes < 0).sum().item() > 0:
|
| 72 |
+
raise ValueError('Invalid audio tokens!')
|
| 73 |
+
|
| 74 |
+
audio_codes = audio_codes.T.unsqueeze(0)
|
| 75 |
+
len_ = torch.tensor([audio_codes.shape[-1]])
|
| 76 |
+
return audio_codes, len_
|
| 77 |
+
|
| 78 |
+
def get_text(self, out_ids):
|
| 79 |
+
try:
|
| 80 |
+
start_t_idx = (out_ids == self.start_of_text).tolist().index(True)
|
| 81 |
+
end_t_idx = (out_ids == self.end_of_text).tolist().index(True)
|
| 82 |
+
txt_tokens = out_ids[start_t_idx : end_t_idx+1]
|
| 83 |
+
text = self.tokenizer.decode(txt_tokens, skip_special_tokens=True)
|
| 84 |
+
return text
|
| 85 |
+
except ValueError:
|
| 86 |
+
return None
|
| 87 |
+
|
| 88 |
+
def get_waveform(self, out_ids):
|
| 89 |
+
out_ids = out_ids.flatten()
|
| 90 |
+
self.output_validation(out_ids)
|
| 91 |
+
audio_codes, len_ = self.get_nano_codes(out_ids)
|
| 92 |
+
audio_codes, len_ = audio_codes.to(self.device), len_.to(self.device)
|
| 93 |
+
with torch.inference_mode():
|
| 94 |
+
reconstructed_audio, _ = self.nemo_codec_model.decode(tokens=audio_codes, tokens_len=len_)
|
| 95 |
+
output_audio = reconstructed_audio.cpu().detach().numpy().squeeze()
|
| 96 |
+
|
| 97 |
+
text = self.get_text(out_ids)
|
| 98 |
+
return output_audio, text
|
| 99 |
+
|
| 100 |
+
def decode_audio_chunk(self, audio_codes):
|
| 101 |
+
"""Decode a chunk of audio codes (shape: [num_frames, num_codebooks])."""
|
| 102 |
+
if len(audio_codes) == 0:
|
| 103 |
+
return None
|
| 104 |
+
|
| 105 |
+
# Process audio codes: subtract offsets for each codebook
|
| 106 |
+
audio_codes = torch.tensor(audio_codes, device=self.device)
|
| 107 |
+
audio_codes = audio_codes - torch.tensor([self.codebook_size * i for i in range(self.num_codebooks)], device=self.device)
|
| 108 |
+
audio_codes = audio_codes - self.audio_tokens_start
|
| 109 |
+
|
| 110 |
+
if (audio_codes < 0).sum().item() > 0:
|
| 111 |
+
return None # Invalid tokens, skip
|
| 112 |
+
|
| 113 |
+
# Shape: (1, 4, num_frames) - batch_size=1, num_codebooks=4, num_frames
|
| 114 |
+
audio_codes = audio_codes.T.unsqueeze(0)
|
| 115 |
+
len_ = torch.tensor([audio_codes.shape[-1]], device=self.device)
|
| 116 |
+
|
| 117 |
+
with torch.inference_mode():
|
| 118 |
+
reconstructed_audio, _ = self.nemo_codec_model.decode(tokens=audio_codes, tokens_len=len_)
|
| 119 |
+
output_audio = reconstructed_audio.cpu().detach().numpy().squeeze()
|
| 120 |
+
|
| 121 |
+
return output_audio
|
| 122 |
+
|
| 123 |
+
def _normalize_reference_audio(self, audio, sample_rate, ref_seconds):
|
| 124 |
+
if audio.ndim == 2:
|
| 125 |
+
audio = audio.mean(axis=1)
|
| 126 |
+
if sample_rate != self.sample_rate:
|
| 127 |
+
audio = resample_poly(audio, self.sample_rate, sample_rate)
|
| 128 |
+
max_samples = int(round(self.sample_rate * ref_seconds))
|
| 129 |
+
return np.asarray(audio[:max_samples], dtype=np.float32)
|
| 130 |
+
|
| 131 |
+
def _flatten_reference_codes(self, ref_tokens):
|
| 132 |
+
codes = ref_tokens[0].detach().cpu().numpy().T
|
| 133 |
+
offsets = np.array([self.codebook_size * i for i in range(self.num_codebooks)])
|
| 134 |
+
codes = codes + offsets
|
| 135 |
+
|
| 136 |
+
if len(codes) > 1:
|
| 137 |
+
frame_changed = np.any(codes[1:] != codes[:-1], axis=1)
|
| 138 |
+
keep = np.insert(frame_changed, 0, True)
|
| 139 |
+
codes = codes[keep]
|
| 140 |
+
|
| 141 |
+
return (codes + self.audio_tokens_start).flatten().tolist()
|
| 142 |
+
|
| 143 |
+
def prepare_reference_audio_tokens(self, reference_audio_path=None, reference_audio_bytes=None, ref_seconds=REF_AUDIO_SECONDS):
|
| 144 |
+
if bool(reference_audio_path) == bool(reference_audio_bytes):
|
| 145 |
+
raise ValueError("Provide exactly one of reference_audio_path or reference_audio_bytes")
|
| 146 |
+
|
| 147 |
+
if reference_audio_path:
|
| 148 |
+
audio, sample_rate = sf.read(reference_audio_path)
|
| 149 |
+
else:
|
| 150 |
+
audio, sample_rate = sf.read(io.BytesIO(reference_audio_bytes))
|
| 151 |
+
|
| 152 |
+
audio = self._normalize_reference_audio(audio, sample_rate, ref_seconds)
|
| 153 |
+
audio_tensor = torch.tensor(audio, dtype=torch.float32, device=self.device).unsqueeze(0)
|
| 154 |
+
audio_len = torch.tensor([audio_tensor.shape[1]], dtype=torch.long, device=self.device)
|
| 155 |
+
|
| 156 |
+
with torch.inference_mode():
|
| 157 |
+
ref_tokens, ref_tokens_len = self.nemo_codec_model.encode(audio=audio_tensor, audio_len=audio_len)
|
| 158 |
+
|
| 159 |
+
return self._flatten_reference_codes(ref_tokens), ref_tokens_len.tolist()
|
audio/streaming.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Streaming audio writer with sliding window decoder"""
|
| 2 |
+
|
| 3 |
+
import threading
|
| 4 |
+
import queue
|
| 5 |
+
import numpy as np
|
| 6 |
+
from scipy.io.wavfile import write
|
| 7 |
+
|
| 8 |
+
from config import SAMPLE_RATE, CHUNK_SIZE, LOOKBACK_FRAMES
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class StreamingAudioWriter:
|
| 12 |
+
def __init__(self, player, output_file, sample_rate=SAMPLE_RATE,
|
| 13 |
+
chunk_size=CHUNK_SIZE, lookback_frames=LOOKBACK_FRAMES):
|
| 14 |
+
"""
|
| 15 |
+
Sliding window decoder with lookback context.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
player: LLMAudioPlayer instance
|
| 19 |
+
output_file: Output WAV file path
|
| 20 |
+
sample_rate: Audio sample rate (22050 Hz for nanocodec)
|
| 21 |
+
chunk_size: Number of NEW frames to output per iteration
|
| 22 |
+
lookback_frames: Number of frames to include from previous context for continuity
|
| 23 |
+
"""
|
| 24 |
+
self.player = player
|
| 25 |
+
self.output_file = output_file
|
| 26 |
+
self.sample_rate = sample_rate
|
| 27 |
+
self.chunk_size = chunk_size
|
| 28 |
+
self.lookback_frames = lookback_frames
|
| 29 |
+
self.token_queue = queue.Queue()
|
| 30 |
+
self.audio_chunks = []
|
| 31 |
+
self.running = True
|
| 32 |
+
self.inside_speech = False
|
| 33 |
+
self.audio_token_buffer = []
|
| 34 |
+
self.all_tokens = [] # Store all audio tokens for sliding window decoding
|
| 35 |
+
self.frames_decoded = 0 # Track how many frames we've already output
|
| 36 |
+
|
| 37 |
+
def decoder_worker(self):
|
| 38 |
+
"""Background thread that decodes audio chunks as they arrive"""
|
| 39 |
+
speech_ended = False
|
| 40 |
+
|
| 41 |
+
while self.running or not self.token_queue.empty():
|
| 42 |
+
try:
|
| 43 |
+
token_id = self.token_queue.get(timeout=0.1)
|
| 44 |
+
|
| 45 |
+
# Check for start/end of speech markers
|
| 46 |
+
if token_id == self.player.start_of_speech:
|
| 47 |
+
self.inside_speech = True
|
| 48 |
+
speech_ended = False
|
| 49 |
+
self.audio_token_buffer = []
|
| 50 |
+
continue
|
| 51 |
+
|
| 52 |
+
if token_id == self.player.end_of_speech:
|
| 53 |
+
|
| 54 |
+
# Decode any remaining frames with sliding window
|
| 55 |
+
total_frames = len(self.all_tokens) // 4
|
| 56 |
+
remaining_frames = total_frames - self.frames_decoded
|
| 57 |
+
|
| 58 |
+
if remaining_frames >= 1:
|
| 59 |
+
# Decode from lookback point to end
|
| 60 |
+
start_frame = max(0, self.frames_decoded - self.lookback_frames)
|
| 61 |
+
start_token = start_frame * 4
|
| 62 |
+
|
| 63 |
+
tokens_to_decode = self.all_tokens[start_token:]
|
| 64 |
+
num_frames = len(tokens_to_decode) // 4
|
| 65 |
+
|
| 66 |
+
if num_frames > 0:
|
| 67 |
+
codes = np.array(tokens_to_decode[:num_frames * 4]).reshape(-1, 4)
|
| 68 |
+
audio_chunk = self.player.decode_audio_chunk(codes)
|
| 69 |
+
|
| 70 |
+
if audio_chunk is not None:
|
| 71 |
+
samples_per_frame = len(audio_chunk) // num_frames
|
| 72 |
+
|
| 73 |
+
# Skip lookback portion, only save new frames
|
| 74 |
+
lookback_skip = min(self.frames_decoded, self.lookback_frames)
|
| 75 |
+
skip_samples = lookback_skip * samples_per_frame
|
| 76 |
+
new_audio = audio_chunk[skip_samples:]
|
| 77 |
+
|
| 78 |
+
self.audio_chunks.append(new_audio)
|
| 79 |
+
|
| 80 |
+
self.inside_speech = False
|
| 81 |
+
speech_ended = True
|
| 82 |
+
self.audio_token_buffer = []
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
# Accumulate audio tokens (only if speech hasn't ended)
|
| 86 |
+
if self.inside_speech and not speech_ended:
|
| 87 |
+
self.audio_token_buffer.append(token_id)
|
| 88 |
+
self.all_tokens.append(token_id) # Keep all tokens for sliding window
|
| 89 |
+
|
| 90 |
+
# Decode when we have enough NEW frames to process
|
| 91 |
+
total_frames = len(self.all_tokens) // 4
|
| 92 |
+
new_frames = total_frames - self.frames_decoded
|
| 93 |
+
|
| 94 |
+
if new_frames >= self.chunk_size:
|
| 95 |
+
# Calculate sliding window: include lookback_frames from previous context
|
| 96 |
+
start_frame = max(0, self.frames_decoded - self.lookback_frames)
|
| 97 |
+
start_token = start_frame * 4
|
| 98 |
+
|
| 99 |
+
# Decode from start_frame to current end
|
| 100 |
+
tokens_to_decode = self.all_tokens[start_token:]
|
| 101 |
+
num_frames = len(tokens_to_decode) // 4
|
| 102 |
+
|
| 103 |
+
codes = np.array(tokens_to_decode[:num_frames * 4]).reshape(-1, 4)
|
| 104 |
+
audio_chunk = self.player.decode_audio_chunk(codes)
|
| 105 |
+
|
| 106 |
+
if audio_chunk is not None:
|
| 107 |
+
samples_per_frame = len(audio_chunk) // num_frames
|
| 108 |
+
|
| 109 |
+
# Skip the lookback portion - only save the NEW frames
|
| 110 |
+
lookback_skip = min(self.frames_decoded, self.lookback_frames)
|
| 111 |
+
skip_samples = lookback_skip * samples_per_frame
|
| 112 |
+
|
| 113 |
+
# Extract only the new chunk_size frames worth of audio
|
| 114 |
+
new_samples = self.chunk_size * samples_per_frame
|
| 115 |
+
new_audio = audio_chunk[skip_samples:skip_samples + new_samples]
|
| 116 |
+
|
| 117 |
+
self.audio_chunks.append(new_audio)
|
| 118 |
+
self.frames_decoded += self.chunk_size
|
| 119 |
+
|
| 120 |
+
# Clear buffer (we've stored everything in all_tokens)
|
| 121 |
+
self.audio_token_buffer = []
|
| 122 |
+
|
| 123 |
+
except queue.Empty:
|
| 124 |
+
continue
|
| 125 |
+
|
| 126 |
+
def add_token(self, token_id):
|
| 127 |
+
"""Add a token to the processing queue"""
|
| 128 |
+
self.token_queue.put(token_id)
|
| 129 |
+
|
| 130 |
+
def finalize(self):
|
| 131 |
+
"""Stop the decoder thread and write final audio file"""
|
| 132 |
+
self.running = False
|
| 133 |
+
self.decoder_thread.join()
|
| 134 |
+
|
| 135 |
+
if self.audio_chunks:
|
| 136 |
+
# Concatenate all audio chunks
|
| 137 |
+
full_audio = np.concatenate(self.audio_chunks)
|
| 138 |
+
|
| 139 |
+
# Calculate actual audio duration
|
| 140 |
+
actual_duration = len(full_audio) / self.sample_rate
|
| 141 |
+
|
| 142 |
+
# Only write to file if output_file is specified
|
| 143 |
+
if self.output_file:
|
| 144 |
+
write(self.output_file, self.sample_rate, full_audio)
|
| 145 |
+
|
| 146 |
+
return full_audio
|
| 147 |
+
return None
|
| 148 |
+
|
| 149 |
+
def start(self):
|
| 150 |
+
"""Start the decoder thread"""
|
| 151 |
+
self.decoder_thread = threading.Thread(target=self.decoder_worker)
|
| 152 |
+
self.decoder_thread.start()
|
benchmark_rtf_warm.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Warm RTF benchmark for the Bulgarian ref-audio vLLM setup."""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from audio import LLMAudioPlayer, StreamingAudioWriter
|
| 7 |
+
from config import CHUNK_SIZE, LOOKBACK_FRAMES, REF_AUDIO_SECONDS, GPU_MEMORY_UTILIZATION, MAX_MODEL_LEN
|
| 8 |
+
from generation.vllm_generator import VLLMTTSGenerator
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
PROMPTS = [
|
| 12 |
+
"Това е кратък тест за real-time factor на ref-audio сървъра.",
|
| 13 |
+
"Рано сутрин реката е спокойна, а гласът трябва да звучи ясно, плавно и естествено.",
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
async def run_once(generator, player, prompt, reference_audio_tokens):
|
| 18 |
+
audio_writer = StreamingAudioWriter(
|
| 19 |
+
player,
|
| 20 |
+
output_file=None,
|
| 21 |
+
chunk_size=CHUNK_SIZE,
|
| 22 |
+
lookback_frames=LOOKBACK_FRAMES,
|
| 23 |
+
)
|
| 24 |
+
audio_writer.start()
|
| 25 |
+
result = await generator._generate_async(
|
| 26 |
+
prompt,
|
| 27 |
+
audio_writer,
|
| 28 |
+
reference_audio_tokens=reference_audio_tokens,
|
| 29 |
+
)
|
| 30 |
+
audio_writer.finalize()
|
| 31 |
+
return result
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
async def main():
|
| 35 |
+
generator = VLLMTTSGenerator(
|
| 36 |
+
tensor_parallel_size=1,
|
| 37 |
+
gpu_memory_utilization=GPU_MEMORY_UTILIZATION,
|
| 38 |
+
max_model_len=MAX_MODEL_LEN,
|
| 39 |
+
)
|
| 40 |
+
await generator.initialize_engine()
|
| 41 |
+
|
| 42 |
+
player = LLMAudioPlayer(generator.tokenizer)
|
| 43 |
+
reference_audio_path = os.environ.get("KANITTS_TEST_REF_AUDIO", "/home/nasko/besttts/REF/woman.wav")
|
| 44 |
+
reference_audio_tokens, reference_frames = player.prepare_reference_audio_tokens(
|
| 45 |
+
reference_audio_path=reference_audio_path,
|
| 46 |
+
ref_seconds=REF_AUDIO_SECONDS,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
print(f"Reference frames: {reference_frames}")
|
| 50 |
+
for index, prompt in enumerate(PROMPTS, start=1):
|
| 51 |
+
result = await run_once(generator, player, prompt, reference_audio_tokens)
|
| 52 |
+
print(
|
| 53 |
+
f"RUN {index}: tokens={len(result['all_token_ids'])} "
|
| 54 |
+
f"dur={result['audio_duration']:.2f}s gen={result['generation_time']:.2f}s "
|
| 55 |
+
f"RTF={result['rtf']:.3f}"
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
if __name__ == "__main__":
|
| 60 |
+
asyncio.run(main())
|
config.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration and constants for ref-audio Kani TTS serving."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# Tokenizer configuration
|
| 7 |
+
TOKENIZER_LENGTH = 64400
|
| 8 |
+
NUM_CODEBOOKS = 4
|
| 9 |
+
|
| 10 |
+
# Special tokens
|
| 11 |
+
START_OF_TEXT = 1
|
| 12 |
+
END_OF_TEXT = 2
|
| 13 |
+
START_OF_SPEECH = TOKENIZER_LENGTH + 1
|
| 14 |
+
END_OF_SPEECH = TOKENIZER_LENGTH + 2
|
| 15 |
+
START_OF_HUMAN = TOKENIZER_LENGTH + 3
|
| 16 |
+
END_OF_HUMAN = TOKENIZER_LENGTH + 4
|
| 17 |
+
START_OF_AI = TOKENIZER_LENGTH + 5
|
| 18 |
+
END_OF_AI = TOKENIZER_LENGTH + 6
|
| 19 |
+
PAD_TOKEN = TOKENIZER_LENGTH + 7
|
| 20 |
+
AUDIO_TOKENS_START = TOKENIZER_LENGTH + 10
|
| 21 |
+
|
| 22 |
+
# Audio configuration
|
| 23 |
+
CODEBOOK_SIZE = 4032
|
| 24 |
+
SAMPLE_RATE = 22050
|
| 25 |
+
REF_AUDIO_SECONDS = 5.0
|
| 26 |
+
|
| 27 |
+
# Streaming configuration
|
| 28 |
+
CHUNK_SIZE = 25 # Number of new frames to output per iteration
|
| 29 |
+
LOOKBACK_FRAMES = 15 # Number of frames to include from previous context
|
| 30 |
+
|
| 31 |
+
# Generation configuration
|
| 32 |
+
TEMPERATURE = 0.6
|
| 33 |
+
TOP_P = 0.95
|
| 34 |
+
REPETITION_PENALTY = 1.1
|
| 35 |
+
REPETITION_CONTEXT_SIZE = 20
|
| 36 |
+
MAX_TOKENS = 1200
|
| 37 |
+
|
| 38 |
+
# Long-form generation configuration
|
| 39 |
+
LONG_FORM_THRESHOLD_SECONDS = 15.0 # Auto-enable chunking for texts estimated >15s
|
| 40 |
+
LONG_FORM_CHUNK_DURATION = 12.0 # Target duration per chunk (stay within 5-15s training distribution)
|
| 41 |
+
LONG_FORM_SILENCE_DURATION = 0.2 # Silence between chunks in seconds
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Model paths
|
| 45 |
+
_ROOT = Path(__file__).resolve().parent
|
| 46 |
+
TOKENIZER_NAME = os.environ.get("KANITTS_TOKENIZER_NAME", "nineninesix/kani-tts-400m-0.3-pt")
|
| 47 |
+
MODEL_NAME = os.environ.get(
|
| 48 |
+
"KANITTS_MODEL_PATH",
|
| 49 |
+
"beleata74/kani-tts-400m-bg-refaudio5s",
|
| 50 |
+
)
|
| 51 |
+
CODEC_MODEL_NAME = "nvidia/nemo-nano-codec-22khz-0.6kbps-12.5fps"
|
| 52 |
+
GPU_MEMORY_UTILIZATION = float(os.environ.get("KANITTS_GPU_MEMORY_UTILIZATION", "0.5"))
|
| 53 |
+
MAX_MODEL_LEN = int(os.environ.get("KANITTS_MAX_MODEL_LEN", "1536"))
|
generation/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Text-to-speech generation modules"""
|
| 2 |
+
|
| 3 |
+
from .vllm_generator import VLLMTTSGenerator
|
| 4 |
+
from .chunking import split_into_sentences, estimate_duration
|
| 5 |
+
|
| 6 |
+
__all__ = ['VLLMTTSGenerator', 'split_into_sentences', 'estimate_duration']
|
generation/chunking.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Text chunking utilities for long-form speech generation"""
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
from typing import List
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def split_into_sentences(text: str, max_duration_seconds: float = 12.0) -> List[str]:
|
| 8 |
+
"""Split text into sentences suitable for TTS generation
|
| 9 |
+
|
| 10 |
+
The chunking strategy ensures each chunk is within the model's training
|
| 11 |
+
distribution (5-15 seconds of speech) for optimal quality.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
text: Input text to split
|
| 15 |
+
max_duration_seconds: Maximum target duration per chunk (default 12s)
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
List of text chunks, each representing ~max_duration_seconds of speech
|
| 19 |
+
|
| 20 |
+
Notes:
|
| 21 |
+
- Uses heuristic of ~15 characters per second of speech
|
| 22 |
+
- Splits on sentence boundaries (., !, ?)
|
| 23 |
+
- Keeps sentences together when possible
|
| 24 |
+
- Fallback to word-level splitting for very long sentences
|
| 25 |
+
"""
|
| 26 |
+
# Heuristic: ~15 characters per second of speech (adjustable based on your model)
|
| 27 |
+
max_chars = int(max_duration_seconds * 15)
|
| 28 |
+
|
| 29 |
+
# Split into sentences using common punctuation
|
| 30 |
+
# This regex keeps the punctuation with the sentence
|
| 31 |
+
sentence_pattern = r'([.!?]+[\s\n]+|[.!?]+$)'
|
| 32 |
+
parts = re.split(sentence_pattern, text)
|
| 33 |
+
|
| 34 |
+
# Reconstruct sentences (combine text + punctuation)
|
| 35 |
+
sentences = []
|
| 36 |
+
for i in range(0, len(parts) - 1, 2):
|
| 37 |
+
sentence = parts[i]
|
| 38 |
+
if i + 1 < len(parts):
|
| 39 |
+
sentence += parts[i + 1]
|
| 40 |
+
sentences.append(sentence.strip())
|
| 41 |
+
|
| 42 |
+
# Handle last part if no punctuation at end
|
| 43 |
+
if len(parts) % 2 == 1 and parts[-1].strip():
|
| 44 |
+
sentences.append(parts[-1].strip())
|
| 45 |
+
|
| 46 |
+
# Filter empty sentences
|
| 47 |
+
sentences = [s for s in sentences if s]
|
| 48 |
+
|
| 49 |
+
# Group sentences into chunks
|
| 50 |
+
chunks = []
|
| 51 |
+
current_chunk = ""
|
| 52 |
+
|
| 53 |
+
for sentence in sentences:
|
| 54 |
+
# If single sentence exceeds max, split it by words
|
| 55 |
+
if len(sentence) > max_chars:
|
| 56 |
+
# Save current chunk if any
|
| 57 |
+
if current_chunk:
|
| 58 |
+
chunks.append(current_chunk.strip())
|
| 59 |
+
current_chunk = ""
|
| 60 |
+
|
| 61 |
+
# Split long sentence into word-based chunks
|
| 62 |
+
words = sentence.split()
|
| 63 |
+
word_chunk = ""
|
| 64 |
+
for word in words:
|
| 65 |
+
if len(word_chunk) + len(word) + 1 <= max_chars:
|
| 66 |
+
word_chunk += word + " "
|
| 67 |
+
else:
|
| 68 |
+
chunks.append(word_chunk.strip())
|
| 69 |
+
word_chunk = word + " "
|
| 70 |
+
|
| 71 |
+
if word_chunk.strip():
|
| 72 |
+
current_chunk = word_chunk.strip()
|
| 73 |
+
|
| 74 |
+
# Check if adding this sentence would exceed max
|
| 75 |
+
elif len(current_chunk) + len(sentence) + 1 <= max_chars:
|
| 76 |
+
current_chunk += " " + sentence if current_chunk else sentence
|
| 77 |
+
else:
|
| 78 |
+
# Save current chunk and start new one
|
| 79 |
+
if current_chunk:
|
| 80 |
+
chunks.append(current_chunk.strip())
|
| 81 |
+
current_chunk = sentence
|
| 82 |
+
|
| 83 |
+
# Add final chunk
|
| 84 |
+
if current_chunk:
|
| 85 |
+
chunks.append(current_chunk.strip())
|
| 86 |
+
|
| 87 |
+
return chunks
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def estimate_duration(text: str, chars_per_second: float = 15.0) -> float:
|
| 91 |
+
"""Estimate speech duration for given text
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
text: Input text
|
| 95 |
+
chars_per_second: Average characters spoken per second
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
Estimated duration in seconds
|
| 99 |
+
"""
|
| 100 |
+
return len(text) / chars_per_second
|
generation/vllm_generator.py
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""VLLM-based text-to-speech generation logic with async streaming"""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import time
|
| 5 |
+
import torch
|
| 6 |
+
import numpy as np
|
| 7 |
+
from vllm import AsyncEngineArgs, AsyncLLMEngine, SamplingParams
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
|
| 10 |
+
from config import (
|
| 11 |
+
MODEL_NAME, TOKENIZER_NAME, START_OF_HUMAN, END_OF_TEXT, END_OF_HUMAN, END_OF_AI,
|
| 12 |
+
START_OF_SPEECH, END_OF_SPEECH, TEMPERATURE, TOP_P, REPETITION_PENALTY,
|
| 13 |
+
MAX_TOKENS, SAMPLE_RATE
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class VLLMTTSGenerator:
|
| 18 |
+
def __init__(self, tensor_parallel_size=1, gpu_memory_utilization=0.9, max_model_len=2048):
|
| 19 |
+
"""Initialize VLLM-based TTS generator with async streaming support
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
tensor_parallel_size: Number of GPUs to use for tensor parallelism
|
| 23 |
+
gpu_memory_utilization: Fraction of GPU memory to use (0.0 to 1.0)
|
| 24 |
+
max_model_len: Maximum sequence length
|
| 25 |
+
"""
|
| 26 |
+
print(f"Loading VLLM AsyncLLMEngine model: {MODEL_NAME}")
|
| 27 |
+
|
| 28 |
+
# Configure engine arguments
|
| 29 |
+
engine_args = AsyncEngineArgs(
|
| 30 |
+
model=MODEL_NAME,
|
| 31 |
+
tokenizer=TOKENIZER_NAME,
|
| 32 |
+
tensor_parallel_size=tensor_parallel_size,
|
| 33 |
+
max_model_len=max_model_len,
|
| 34 |
+
gpu_memory_utilization=gpu_memory_utilization,
|
| 35 |
+
enforce_eager=False, # Allow CUDA graphs (reduces kernel launch overhead)
|
| 36 |
+
max_num_seqs=1, # Single sequence for TTS - enables better CUDA graph optimization
|
| 37 |
+
dtype="bfloat16", # BF16 for faster inference on RTX 5090
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
# Create async engine
|
| 41 |
+
self.engine = None # Will be initialized in async context
|
| 42 |
+
self.engine_args = engine_args
|
| 43 |
+
|
| 44 |
+
self.tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_NAME)
|
| 45 |
+
|
| 46 |
+
# Pre-configure sampling parameters
|
| 47 |
+
self.sampling_params = SamplingParams(
|
| 48 |
+
temperature=TEMPERATURE,
|
| 49 |
+
top_p=TOP_P,
|
| 50 |
+
max_tokens=MAX_TOKENS,
|
| 51 |
+
repetition_penalty=REPETITION_PENALTY,
|
| 52 |
+
stop_token_ids=[END_OF_AI],
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
async def initialize_engine(self):
|
| 56 |
+
"""Initialize the async engine - call this during startup to avoid lazy loading"""
|
| 57 |
+
if self.engine is None:
|
| 58 |
+
print("Initializing VLLM AsyncLLMEngine...")
|
| 59 |
+
self.engine = AsyncLLMEngine.from_engine_args(self.engine_args)
|
| 60 |
+
print("VLLM AsyncLLMEngine initialized and ready!")
|
| 61 |
+
|
| 62 |
+
def prepare_input(self, prompt_text, reference_audio_tokens=None):
|
| 63 |
+
"""Build custom input_ids with ref-audio conditioning tokens."""
|
| 64 |
+
input_ids = self.tokenizer(prompt_text, return_tensors="pt").input_ids
|
| 65 |
+
reference_audio_tokens = reference_audio_tokens or []
|
| 66 |
+
|
| 67 |
+
start_token = torch.tensor([[START_OF_HUMAN]], dtype=torch.int64)
|
| 68 |
+
ref_tokens = torch.tensor(
|
| 69 |
+
[[START_OF_SPEECH] + reference_audio_tokens + [END_OF_SPEECH]],
|
| 70 |
+
dtype=torch.int64,
|
| 71 |
+
)
|
| 72 |
+
end_tokens = torch.tensor([[END_OF_TEXT, END_OF_HUMAN]], dtype=torch.int64)
|
| 73 |
+
modified_input_ids = torch.cat([start_token, ref_tokens, input_ids, end_tokens], dim=1)
|
| 74 |
+
|
| 75 |
+
# Convert to list for VLLM
|
| 76 |
+
return modified_input_ids[0].tolist()
|
| 77 |
+
|
| 78 |
+
async def _generate_async(self, prompt, audio_writer, max_tokens=MAX_TOKENS, reference_audio_tokens=None):
|
| 79 |
+
"""Async generator that streams tokens as they are generated
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
prompt: Text prompt to convert to speech
|
| 83 |
+
audio_writer: StreamingAudioWriter instance to receive tokens
|
| 84 |
+
max_tokens: Maximum number of tokens to generate
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
Dictionary with generation metrics and results
|
| 88 |
+
"""
|
| 89 |
+
# Initialize engine if needed
|
| 90 |
+
if self.engine is None:
|
| 91 |
+
self.engine = AsyncLLMEngine.from_engine_args(self.engine_args)
|
| 92 |
+
|
| 93 |
+
# Prepare input_ids with special tokens
|
| 94 |
+
input_ids = self.prepare_input(prompt, reference_audio_tokens=reference_audio_tokens)
|
| 95 |
+
|
| 96 |
+
point_1 = time.time()
|
| 97 |
+
|
| 98 |
+
# Override max_tokens if different from default
|
| 99 |
+
if max_tokens != MAX_TOKENS:
|
| 100 |
+
sampling_params = SamplingParams(
|
| 101 |
+
temperature=TEMPERATURE,
|
| 102 |
+
top_p=TOP_P,
|
| 103 |
+
max_tokens=max_tokens,
|
| 104 |
+
repetition_penalty=REPETITION_PENALTY,
|
| 105 |
+
stop_token_ids=[END_OF_AI],
|
| 106 |
+
)
|
| 107 |
+
else:
|
| 108 |
+
sampling_params = self.sampling_params
|
| 109 |
+
|
| 110 |
+
# Generate unique request ID
|
| 111 |
+
request_id = f"tts-{id(prompt)}-{time.time()}"
|
| 112 |
+
|
| 113 |
+
# Stream tokens as they are generated
|
| 114 |
+
all_token_ids = []
|
| 115 |
+
audio_token_count = 0
|
| 116 |
+
inside_speech = False
|
| 117 |
+
|
| 118 |
+
# Add request to engine with TokensPrompt
|
| 119 |
+
results_generator = self.engine.generate(
|
| 120 |
+
{"prompt_token_ids": input_ids},
|
| 121 |
+
sampling_params,
|
| 122 |
+
request_id=request_id
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
async for request_output in results_generator:
|
| 126 |
+
# Get newly generated tokens
|
| 127 |
+
new_token_ids = request_output.outputs[0].token_ids
|
| 128 |
+
|
| 129 |
+
# Find which tokens are new since last iteration
|
| 130 |
+
num_new_tokens = len(new_token_ids) - len(all_token_ids)
|
| 131 |
+
if num_new_tokens > 0:
|
| 132 |
+
new_tokens = new_token_ids[-num_new_tokens:]
|
| 133 |
+
all_token_ids.extend(new_tokens)
|
| 134 |
+
|
| 135 |
+
# Stream each new token to audio_writer and count audio tokens
|
| 136 |
+
for token_id in new_tokens:
|
| 137 |
+
# print(f"[VLLM] Token {len(all_token_ids)}: {token_id}")
|
| 138 |
+
audio_writer.add_token(token_id)
|
| 139 |
+
|
| 140 |
+
# Track audio tokens efficiently during streaming
|
| 141 |
+
if token_id == audio_writer.player.start_of_speech:
|
| 142 |
+
inside_speech = True
|
| 143 |
+
elif token_id == audio_writer.player.end_of_speech:
|
| 144 |
+
inside_speech = False
|
| 145 |
+
elif inside_speech:
|
| 146 |
+
audio_token_count += 1
|
| 147 |
+
|
| 148 |
+
point_2 = time.time()
|
| 149 |
+
generation_time = point_2 - point_1
|
| 150 |
+
|
| 151 |
+
# Calculate Real Time Factor (RTF)
|
| 152 |
+
# Audio codec runs at 12.5 fps, audio tokens come in groups of 4 per frame
|
| 153 |
+
FRAMES_PER_SECOND = 12.5
|
| 154 |
+
TOKENS_PER_FRAME = 4
|
| 155 |
+
|
| 156 |
+
# Calculate audio duration: tokens / 4 = frames, frames / 12.5 = seconds
|
| 157 |
+
num_frames = audio_token_count // TOKENS_PER_FRAME
|
| 158 |
+
audio_duration = num_frames / FRAMES_PER_SECOND
|
| 159 |
+
rtf = generation_time / audio_duration if audio_duration > 0 else 0
|
| 160 |
+
|
| 161 |
+
# Calculate token counts
|
| 162 |
+
prompt_tokens = len(input_ids)
|
| 163 |
+
generated_tokens = len(all_token_ids)
|
| 164 |
+
total_tokens = prompt_tokens + generated_tokens
|
| 165 |
+
|
| 166 |
+
print(f"\n[VLLM] Generation complete. Prompt tokens: {prompt_tokens}, Generated tokens: {generated_tokens}, Total: {total_tokens}")
|
| 167 |
+
print(f" Audio tokens: {audio_token_count}, Frames: {num_frames}, Audio duration: {audio_duration:.2f}s")
|
| 168 |
+
print(f" Generation time: {generation_time:.2f}s, RTF: {rtf:.3f}")
|
| 169 |
+
|
| 170 |
+
# OPTIMIZATION: Skip text decoding - it's slow and not needed for TTS
|
| 171 |
+
|
| 172 |
+
return {
|
| 173 |
+
'all_token_ids': all_token_ids,
|
| 174 |
+
'generation_time': generation_time,
|
| 175 |
+
'audio_duration': audio_duration,
|
| 176 |
+
'rtf': rtf,
|
| 177 |
+
'point_1': point_1,
|
| 178 |
+
'point_2': point_2
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
def generate(self, prompt, audio_writer, max_tokens=MAX_TOKENS, reference_audio_tokens=None):
|
| 182 |
+
"""Generate speech tokens from text prompt with streaming
|
| 183 |
+
|
| 184 |
+
This is a synchronous wrapper around the async streaming implementation.
|
| 185 |
+
|
| 186 |
+
Args:
|
| 187 |
+
prompt: Text prompt to convert to speech
|
| 188 |
+
audio_writer: StreamingAudioWriter instance to receive tokens
|
| 189 |
+
max_tokens: Maximum number of tokens to generate
|
| 190 |
+
|
| 191 |
+
Returns:
|
| 192 |
+
Dictionary with generation metrics and results
|
| 193 |
+
"""
|
| 194 |
+
# Try to get the current event loop, or create a new one if needed
|
| 195 |
+
try:
|
| 196 |
+
loop = asyncio.get_running_loop()
|
| 197 |
+
except RuntimeError:
|
| 198 |
+
# No event loop running, create one
|
| 199 |
+
return asyncio.run(self._generate_async(prompt, audio_writer, max_tokens, reference_audio_tokens=reference_audio_tokens))
|
| 200 |
+
else:
|
| 201 |
+
# Event loop is running, we need to run in a thread pool
|
| 202 |
+
import concurrent.futures
|
| 203 |
+
import threading
|
| 204 |
+
|
| 205 |
+
result = None
|
| 206 |
+
exception = None
|
| 207 |
+
|
| 208 |
+
def run_in_new_loop():
|
| 209 |
+
nonlocal result, exception
|
| 210 |
+
try:
|
| 211 |
+
new_loop = asyncio.new_event_loop()
|
| 212 |
+
asyncio.set_event_loop(new_loop)
|
| 213 |
+
result = new_loop.run_until_complete(
|
| 214 |
+
self._generate_async(
|
| 215 |
+
prompt,
|
| 216 |
+
audio_writer,
|
| 217 |
+
max_tokens,
|
| 218 |
+
reference_audio_tokens=reference_audio_tokens,
|
| 219 |
+
)
|
| 220 |
+
)
|
| 221 |
+
new_loop.close()
|
| 222 |
+
except Exception as e:
|
| 223 |
+
exception = e
|
| 224 |
+
|
| 225 |
+
thread = threading.Thread(target=run_in_new_loop)
|
| 226 |
+
thread.start()
|
| 227 |
+
thread.join()
|
| 228 |
+
|
| 229 |
+
if exception:
|
| 230 |
+
raise exception
|
| 231 |
+
|
| 232 |
+
return result
|
| 233 |
+
|
| 234 |
+
async def generate_long_form_async(self, text, reference_audio_tokens, player, max_chunk_duration=12.0,
|
| 235 |
+
silence_duration=0.2, max_tokens=MAX_TOKENS):
|
| 236 |
+
"""Generate speech for long text by splitting into chunks with shared reference audio.
|
| 237 |
+
|
| 238 |
+
This method handles texts longer than the model's training distribution (5-15s)
|
| 239 |
+
by splitting into sentence-based chunks and generating each with the same voice.
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
text: Input text (can be any length)
|
| 243 |
+
reference_audio_tokens: Flattened reference audio token sequence reused for each chunk
|
| 244 |
+
player: LLMAudioPlayer instance for decoding audio
|
| 245 |
+
max_chunk_duration: Target duration per chunk in seconds (default 12s)
|
| 246 |
+
silence_duration: Duration of silence between chunks in seconds (default 0.2s)
|
| 247 |
+
max_tokens: Maximum tokens per generation
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
Dictionary with:
|
| 251 |
+
- audio: Concatenated audio as numpy array
|
| 252 |
+
- chunks_info: List of info dicts for each chunk
|
| 253 |
+
- total_duration: Total audio duration in seconds
|
| 254 |
+
- total_generation_time: Total time spent generating
|
| 255 |
+
"""
|
| 256 |
+
from generation.chunking import split_into_sentences, estimate_duration
|
| 257 |
+
from audio.streaming import StreamingAudioWriter
|
| 258 |
+
|
| 259 |
+
# Estimate if text needs chunking
|
| 260 |
+
estimated_duration = estimate_duration(text)
|
| 261 |
+
print(f"\n[Long-form] Estimated duration: {estimated_duration:.1f}s for text length: {len(text)} chars")
|
| 262 |
+
|
| 263 |
+
# Split into chunks
|
| 264 |
+
chunks = split_into_sentences(text, max_duration_seconds=max_chunk_duration)
|
| 265 |
+
print(f"[Long-form] Split into {len(chunks)} chunks")
|
| 266 |
+
|
| 267 |
+
if len(chunks) == 1:
|
| 268 |
+
print("[Long-form] Single chunk - using standard generation")
|
| 269 |
+
|
| 270 |
+
# Generate each chunk with the same reference audio tokens for consistency
|
| 271 |
+
audio_segments = []
|
| 272 |
+
chunks_info = []
|
| 273 |
+
total_generation_time = 0
|
| 274 |
+
|
| 275 |
+
for i, chunk in enumerate(chunks):
|
| 276 |
+
print(f"\n[Long-form] Generating chunk {i+1}/{len(chunks)}: '{chunk[:50]}...'")
|
| 277 |
+
|
| 278 |
+
prompt = chunk
|
| 279 |
+
|
| 280 |
+
# Create audio writer for this chunk
|
| 281 |
+
audio_writer = StreamingAudioWriter(
|
| 282 |
+
player,
|
| 283 |
+
output_file=None, # Don't write to file
|
| 284 |
+
chunk_size=25, # Use default chunk size
|
| 285 |
+
lookback_frames=15 # Use default lookback
|
| 286 |
+
)
|
| 287 |
+
audio_writer.start()
|
| 288 |
+
|
| 289 |
+
# Generate this chunk
|
| 290 |
+
result = await self._generate_async(
|
| 291 |
+
prompt,
|
| 292 |
+
audio_writer,
|
| 293 |
+
max_tokens=max_tokens,
|
| 294 |
+
reference_audio_tokens=reference_audio_tokens,
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
# Finalize and get audio
|
| 298 |
+
audio = audio_writer.finalize()
|
| 299 |
+
|
| 300 |
+
if audio is not None and len(audio) > 0:
|
| 301 |
+
audio_segments.append(audio)
|
| 302 |
+
chunks_info.append({
|
| 303 |
+
'chunk_index': i,
|
| 304 |
+
'text': chunk,
|
| 305 |
+
'duration': result['audio_duration'],
|
| 306 |
+
'generation_time': result['generation_time'],
|
| 307 |
+
'rtf': result['rtf']
|
| 308 |
+
})
|
| 309 |
+
total_generation_time += result['generation_time']
|
| 310 |
+
else:
|
| 311 |
+
print(f"[Long-form] Warning: No audio generated for chunk {i+1}")
|
| 312 |
+
|
| 313 |
+
# Concatenate audio segments with silence
|
| 314 |
+
if len(audio_segments) == 0:
|
| 315 |
+
raise ValueError("No audio was generated")
|
| 316 |
+
|
| 317 |
+
if len(audio_segments) == 1:
|
| 318 |
+
final_audio = audio_segments[0]
|
| 319 |
+
else:
|
| 320 |
+
final_audio = self._concatenate_with_silence(
|
| 321 |
+
audio_segments,
|
| 322 |
+
silence_duration=silence_duration
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
total_duration = len(final_audio) / SAMPLE_RATE
|
| 326 |
+
|
| 327 |
+
print(f"\n[Long-form] Complete!")
|
| 328 |
+
print(f" Total chunks: {len(chunks)}")
|
| 329 |
+
print(f" Total duration: {total_duration:.2f}s")
|
| 330 |
+
print(f" Total generation time: {total_generation_time:.2f}s")
|
| 331 |
+
print(f" Overall RTF: {total_generation_time / total_duration:.3f}")
|
| 332 |
+
|
| 333 |
+
return {
|
| 334 |
+
'audio': final_audio,
|
| 335 |
+
'chunks_info': chunks_info,
|
| 336 |
+
'total_duration': total_duration,
|
| 337 |
+
'total_generation_time': total_generation_time,
|
| 338 |
+
'num_chunks': len(chunks)
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
def _concatenate_with_silence(self, audio_segments, silence_duration=0.2):
|
| 342 |
+
"""Concatenate audio segments with short silence between them
|
| 343 |
+
|
| 344 |
+
Args:
|
| 345 |
+
audio_segments: List of numpy audio arrays
|
| 346 |
+
silence_duration: Duration of silence in seconds
|
| 347 |
+
|
| 348 |
+
Returns:
|
| 349 |
+
Concatenated audio as numpy array
|
| 350 |
+
"""
|
| 351 |
+
if len(audio_segments) == 1:
|
| 352 |
+
return audio_segments[0]
|
| 353 |
+
|
| 354 |
+
# Create silence buffer (zeros)
|
| 355 |
+
silence_samples = int(silence_duration * SAMPLE_RATE)
|
| 356 |
+
silence = np.zeros(silence_samples, dtype=audio_segments[0].dtype)
|
| 357 |
+
|
| 358 |
+
# Concatenate segments with silence in between
|
| 359 |
+
result = audio_segments[0]
|
| 360 |
+
for next_segment in audio_segments[1:]:
|
| 361 |
+
result = np.concatenate([result, silence, next_segment])
|
| 362 |
+
|
| 363 |
+
return result
|
render_radio_play.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import re
|
| 4 |
+
import time
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import requests
|
| 10 |
+
import soundfile as sf
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
SAMPLE_RATE = 22050
|
| 14 |
+
PUNCTUATION = ("!", "?", ".", ",")
|
| 15 |
+
LINE_PATTERN = re.compile(r"^\[(?P<speaker>[^\]]+)\]:\s*(?P<text>.*)$")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass
|
| 19 |
+
class ScriptLine:
|
| 20 |
+
line_number: int
|
| 21 |
+
speaker: str
|
| 22 |
+
text: str
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@dataclass
|
| 26 |
+
class Chunk:
|
| 27 |
+
segment_id: int
|
| 28 |
+
chunk_id: int
|
| 29 |
+
line_number: int
|
| 30 |
+
speaker: str
|
| 31 |
+
text: str
|
| 32 |
+
reference_audio_path: Path
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def parse_script(script_path: Path) -> list[ScriptLine]:
|
| 36 |
+
items: list[ScriptLine] = []
|
| 37 |
+
for line_number, raw_line in enumerate(script_path.read_text(encoding="utf-8").splitlines(), start=1):
|
| 38 |
+
stripped = raw_line.strip()
|
| 39 |
+
if not stripped:
|
| 40 |
+
continue
|
| 41 |
+
match = LINE_PATTERN.match(stripped)
|
| 42 |
+
if not match:
|
| 43 |
+
raise ValueError(f"Invalid script line {line_number}: {raw_line}")
|
| 44 |
+
text = re.sub(r"\s+", " ", match.group("text")).strip()
|
| 45 |
+
if not text:
|
| 46 |
+
continue
|
| 47 |
+
items.append(ScriptLine(line_number=line_number, speaker=match.group("speaker"), text=text))
|
| 48 |
+
return items
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def split_text(text: str, max_chars: int) -> list[str]:
|
| 52 |
+
remaining = text.strip()
|
| 53 |
+
parts: list[str] = []
|
| 54 |
+
while len(remaining) > max_chars:
|
| 55 |
+
window = remaining[:max_chars]
|
| 56 |
+
split_at = max(window.rfind(mark) for mark in PUNCTUATION)
|
| 57 |
+
if split_at <= 0:
|
| 58 |
+
split_at = window.rfind(" ")
|
| 59 |
+
if split_at <= 0:
|
| 60 |
+
split_at = max_chars
|
| 61 |
+
else:
|
| 62 |
+
split_at += 1
|
| 63 |
+
|
| 64 |
+
part = remaining[:split_at].strip()
|
| 65 |
+
if part:
|
| 66 |
+
parts.append(part)
|
| 67 |
+
remaining = remaining[split_at:].lstrip()
|
| 68 |
+
|
| 69 |
+
if remaining:
|
| 70 |
+
parts.append(remaining)
|
| 71 |
+
return parts
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def build_chunks(lines: list[ScriptLine], ref_dir: Path, max_chars: int) -> list[Chunk]:
|
| 75 |
+
chunks: list[Chunk] = []
|
| 76 |
+
for segment_id, line in enumerate(lines):
|
| 77 |
+
ref_path = ref_dir / f"{line.speaker}.wav"
|
| 78 |
+
if not ref_path.exists():
|
| 79 |
+
raise FileNotFoundError(f"Missing reference audio for speaker '{line.speaker}': {ref_path}")
|
| 80 |
+
for chunk_id, part in enumerate(split_text(line.text, max_chars), start=1):
|
| 81 |
+
chunks.append(
|
| 82 |
+
Chunk(
|
| 83 |
+
segment_id=segment_id,
|
| 84 |
+
chunk_id=chunk_id,
|
| 85 |
+
line_number=line.line_number,
|
| 86 |
+
speaker=line.speaker,
|
| 87 |
+
text=part,
|
| 88 |
+
reference_audio_path=ref_path,
|
| 89 |
+
)
|
| 90 |
+
)
|
| 91 |
+
return chunks
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def synthesize_chunk(session: requests.Session, server_url: str, chunk: Chunk) -> tuple[np.ndarray, dict]:
|
| 95 |
+
payload = {
|
| 96 |
+
"input": chunk.text,
|
| 97 |
+
"model": "tts-1",
|
| 98 |
+
"reference_audio_path": str(chunk.reference_audio_path),
|
| 99 |
+
"response_format": "pcm",
|
| 100 |
+
"enable_long_form": False,
|
| 101 |
+
}
|
| 102 |
+
started = time.perf_counter()
|
| 103 |
+
response = session.post(server_url, json=payload, timeout=300)
|
| 104 |
+
elapsed = time.perf_counter() - started
|
| 105 |
+
response.raise_for_status()
|
| 106 |
+
|
| 107 |
+
pcm = np.frombuffer(response.content, dtype=np.int16).astype(np.float32) / 32768.0
|
| 108 |
+
duration = len(pcm) / SAMPLE_RATE
|
| 109 |
+
return pcm, {
|
| 110 |
+
"segment_id": chunk.segment_id,
|
| 111 |
+
"chunk_id": chunk.chunk_id,
|
| 112 |
+
"line_number": chunk.line_number,
|
| 113 |
+
"speaker": chunk.speaker,
|
| 114 |
+
"text": chunk.text,
|
| 115 |
+
"reference_audio_path": str(chunk.reference_audio_path),
|
| 116 |
+
"latency_seconds": elapsed,
|
| 117 |
+
"audio_duration_seconds": duration,
|
| 118 |
+
"rtf": elapsed / duration if duration else None,
|
| 119 |
+
"pcm_bytes": len(response.content),
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def main() -> None:
|
| 124 |
+
parser = argparse.ArgumentParser(description="Render a radio play script through the local KaniTTS FastAPI server.")
|
| 125 |
+
parser.add_argument("--script", type=Path, required=True)
|
| 126 |
+
parser.add_argument("--ref-dir", type=Path, required=True)
|
| 127 |
+
parser.add_argument("--output-dir", type=Path, required=True)
|
| 128 |
+
parser.add_argument("--server-url", default="http://127.0.0.1:8010/v1/audio/speech")
|
| 129 |
+
parser.add_argument("--max-chars", type=int, default=180)
|
| 130 |
+
parser.add_argument("--same-line-pause-ms", type=float, default=60.0)
|
| 131 |
+
parser.add_argument("--line-pause-ms", type=float, default=180.0)
|
| 132 |
+
args = parser.parse_args()
|
| 133 |
+
|
| 134 |
+
lines = parse_script(args.script)
|
| 135 |
+
chunks = build_chunks(lines, args.ref_dir, args.max_chars)
|
| 136 |
+
|
| 137 |
+
args.output_dir.mkdir(parents=True, exist_ok=True)
|
| 138 |
+
output_wav = args.output_dir / f"{args.script.stem}_radio_play.wav"
|
| 139 |
+
output_json = args.output_dir / f"{args.script.stem}_report.json"
|
| 140 |
+
|
| 141 |
+
same_line_pause = np.zeros(int(SAMPLE_RATE * args.same_line_pause_ms / 1000.0), dtype=np.float32)
|
| 142 |
+
line_pause = np.zeros(int(SAMPLE_RATE * args.line_pause_ms / 1000.0), dtype=np.float32)
|
| 143 |
+
|
| 144 |
+
rendered_audio: list[np.ndarray] = []
|
| 145 |
+
report_chunks: list[dict] = []
|
| 146 |
+
total_started = time.perf_counter()
|
| 147 |
+
|
| 148 |
+
with requests.Session() as session:
|
| 149 |
+
for index, chunk in enumerate(chunks, start=1):
|
| 150 |
+
audio, chunk_report = synthesize_chunk(session, args.server_url, chunk)
|
| 151 |
+
rendered_audio.append(audio)
|
| 152 |
+
report_chunks.append(chunk_report)
|
| 153 |
+
|
| 154 |
+
next_chunk = chunks[index] if index < len(chunks) else None
|
| 155 |
+
if next_chunk is not None:
|
| 156 |
+
if next_chunk.segment_id == chunk.segment_id:
|
| 157 |
+
rendered_audio.append(same_line_pause)
|
| 158 |
+
else:
|
| 159 |
+
rendered_audio.append(line_pause)
|
| 160 |
+
|
| 161 |
+
print(
|
| 162 |
+
f"[{index}/{len(chunks)}] {chunk.speaker} line {chunk.line_number} part {chunk.chunk_id}: "
|
| 163 |
+
f"{chunk_report['audio_duration_seconds']:.2f}s audio in {chunk_report['latency_seconds']:.2f}s "
|
| 164 |
+
f"RTF={chunk_report['rtf']:.3f}"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
total_elapsed = time.perf_counter() - total_started
|
| 168 |
+
full_audio = np.concatenate(rendered_audio) if rendered_audio else np.zeros(0, dtype=np.float32)
|
| 169 |
+
sf.write(output_wav, full_audio, SAMPLE_RATE)
|
| 170 |
+
|
| 171 |
+
total_audio_duration = len(full_audio) / SAMPLE_RATE
|
| 172 |
+
report = {
|
| 173 |
+
"script": str(args.script),
|
| 174 |
+
"ref_dir": str(args.ref_dir),
|
| 175 |
+
"server_url": args.server_url,
|
| 176 |
+
"max_chars": args.max_chars,
|
| 177 |
+
"source_lines": len(lines),
|
| 178 |
+
"rendered_chunks": len(chunks),
|
| 179 |
+
"total_latency_seconds": total_elapsed,
|
| 180 |
+
"total_audio_duration_seconds": total_audio_duration,
|
| 181 |
+
"overall_rtf": total_elapsed / total_audio_duration if total_audio_duration else None,
|
| 182 |
+
"output_wav": str(output_wav),
|
| 183 |
+
"chunks": report_chunks,
|
| 184 |
+
}
|
| 185 |
+
output_json.write_text(json.dumps(report, ensure_ascii=False, indent=2), encoding="utf-8")
|
| 186 |
+
|
| 187 |
+
print("\nRender complete")
|
| 188 |
+
print(f"Output WAV: {output_wav}")
|
| 189 |
+
print(f"Report JSON: {output_json}")
|
| 190 |
+
print(f"Source lines: {len(lines)}")
|
| 191 |
+
print(f"Rendered chunks: {len(chunks)}")
|
| 192 |
+
print(f"Total audio duration: {total_audio_duration:.2f}s")
|
| 193 |
+
print(f"Total latency: {total_elapsed:.2f}s")
|
| 194 |
+
print(f"Overall RTF: {report['overall_rtf']:.3f}")
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
if __name__ == "__main__":
|
| 198 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
vllm
|
| 4 |
+
nemo-toolkit[tts]==2.4.0
|
| 5 |
+
transformers==4.57.1
|
| 6 |
+
torch
|
| 7 |
+
numpy
|
| 8 |
+
scipy
|
| 9 |
+
soundfile
|
server.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI server for Kani TTS with streaming support"""
|
| 2 |
+
|
| 3 |
+
import binascii
|
| 4 |
+
import io
|
| 5 |
+
import os
|
| 6 |
+
from fastapi import FastAPI, HTTPException
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
from fastapi.responses import StreamingResponse, Response
|
| 9 |
+
from pydantic import BaseModel, Field
|
| 10 |
+
from typing import Optional, Literal
|
| 11 |
+
import numpy as np
|
| 12 |
+
from scipy.io.wavfile import write as wav_write
|
| 13 |
+
import base64
|
| 14 |
+
import json
|
| 15 |
+
|
| 16 |
+
from audio import LLMAudioPlayer, StreamingAudioWriter
|
| 17 |
+
from generation.vllm_generator import VLLMTTSGenerator
|
| 18 |
+
from config import (
|
| 19 |
+
CHUNK_SIZE,
|
| 20 |
+
LOOKBACK_FRAMES,
|
| 21 |
+
TEMPERATURE,
|
| 22 |
+
TOP_P,
|
| 23 |
+
MAX_TOKENS,
|
| 24 |
+
LONG_FORM_THRESHOLD_SECONDS,
|
| 25 |
+
LONG_FORM_SILENCE_DURATION,
|
| 26 |
+
LONG_FORM_CHUNK_DURATION,
|
| 27 |
+
REF_AUDIO_SECONDS,
|
| 28 |
+
GPU_MEMORY_UTILIZATION,
|
| 29 |
+
MAX_MODEL_LEN,
|
| 30 |
+
MODEL_NAME,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from nemo.utils.nemo_logging import Logger
|
| 34 |
+
|
| 35 |
+
nemo_logger = Logger()
|
| 36 |
+
nemo_logger.remove_stream_handlers()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
app = FastAPI(title="Kani TTS API", version="1.0.0")
|
| 40 |
+
|
| 41 |
+
# Add CORS middleware to allow client.html to connect
|
| 42 |
+
app.add_middleware(
|
| 43 |
+
CORSMiddleware,
|
| 44 |
+
allow_origins=["*"], # In production, specify your frontend domain
|
| 45 |
+
allow_credentials=True,
|
| 46 |
+
allow_methods=["*"],
|
| 47 |
+
allow_headers=["*"],
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
# Global instances (initialized on startup)
|
| 51 |
+
generator = None
|
| 52 |
+
player = None
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class TTSRequest(BaseModel):
|
| 56 |
+
text: str
|
| 57 |
+
temperature: Optional[float] = TEMPERATURE
|
| 58 |
+
max_tokens: Optional[int] = MAX_TOKENS
|
| 59 |
+
top_p: Optional[float] = TOP_P
|
| 60 |
+
chunk_size: Optional[int] = CHUNK_SIZE
|
| 61 |
+
lookback_frames: Optional[int] = LOOKBACK_FRAMES
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class OpenAISpeechRequest(BaseModel):
|
| 65 |
+
"""OpenAI-compatible speech request model"""
|
| 66 |
+
input: str = Field(..., description="Text to convert to speech")
|
| 67 |
+
model: Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"] = Field(default="tts-1", description="TTS model to use")
|
| 68 |
+
voice: Optional[str] = Field(default=None, description="Deprecated for this ref-audio server")
|
| 69 |
+
reference_audio_path: Optional[str] = Field(default=None, description="Server-side path to a WAV file used as voice reference")
|
| 70 |
+
reference_audio_base64: Optional[str] = Field(default=None, description="Base64-encoded WAV bytes used as voice reference")
|
| 71 |
+
ref_seconds: Optional[float] = Field(default=REF_AUDIO_SECONDS, description="How many seconds to take from the reference audio")
|
| 72 |
+
response_format: Literal["wav", "pcm"] = Field(default="wav", description="Audio format: wav or pcm")
|
| 73 |
+
stream_format: Optional[Literal["sse", "audio"]] = Field(default=None, description="Use 'sse' for Server-Sent Events streaming")
|
| 74 |
+
# Long-form generation parameters
|
| 75 |
+
enable_long_form: Optional[bool] = Field(default=True, description="Auto-detect and use long-form generation for texts >15s")
|
| 76 |
+
max_chunk_duration: Optional[float] = Field(default=12.0, description="Max duration per chunk in long-form mode (seconds)")
|
| 77 |
+
silence_duration: Optional[float] = Field(default=0.2, description="Silence between chunks in long-form mode (seconds)")
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@app.on_event("startup")
|
| 81 |
+
async def startup_event():
|
| 82 |
+
"""Initialize models on startup"""
|
| 83 |
+
global generator, player
|
| 84 |
+
print("🚀 Initializing VLLM TTS models...")
|
| 85 |
+
|
| 86 |
+
# Use VLLM for faster inference
|
| 87 |
+
generator = VLLMTTSGenerator(
|
| 88 |
+
tensor_parallel_size=1, # Increase for multi-GPU
|
| 89 |
+
gpu_memory_utilization=GPU_MEMORY_UTILIZATION,
|
| 90 |
+
max_model_len=MAX_MODEL_LEN,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# Initialize the async engine during startup to avoid lazy loading on first request
|
| 94 |
+
await generator.initialize_engine()
|
| 95 |
+
|
| 96 |
+
player = LLMAudioPlayer(generator.tokenizer)
|
| 97 |
+
print("✅ VLLM TTS models initialized successfully!")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@app.get("/health")
|
| 101 |
+
async def health_check():
|
| 102 |
+
"""Check if server is ready"""
|
| 103 |
+
return {
|
| 104 |
+
"status": "healthy",
|
| 105 |
+
"tts_initialized": generator is not None and player is not None,
|
| 106 |
+
"model_path": MODEL_NAME,
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _resolve_reference_tokens(request: OpenAISpeechRequest):
|
| 111 |
+
if not request.reference_audio_path and not request.reference_audio_base64:
|
| 112 |
+
raise HTTPException(status_code=400, detail="reference_audio_path or reference_audio_base64 is required")
|
| 113 |
+
if request.reference_audio_path and request.reference_audio_base64:
|
| 114 |
+
raise HTTPException(status_code=400, detail="Provide only one of reference_audio_path or reference_audio_base64")
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
if request.reference_audio_path:
|
| 118 |
+
if not os.path.exists(request.reference_audio_path):
|
| 119 |
+
raise HTTPException(status_code=400, detail=f"Reference audio not found: {request.reference_audio_path}")
|
| 120 |
+
return player.prepare_reference_audio_tokens(
|
| 121 |
+
reference_audio_path=request.reference_audio_path,
|
| 122 |
+
ref_seconds=request.ref_seconds or REF_AUDIO_SECONDS,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
try:
|
| 126 |
+
reference_audio_bytes = base64.b64decode(request.reference_audio_base64)
|
| 127 |
+
except (binascii.Error, ValueError) as exc:
|
| 128 |
+
raise HTTPException(status_code=400, detail=f"Invalid reference_audio_base64: {exc}") from exc
|
| 129 |
+
|
| 130 |
+
return player.prepare_reference_audio_tokens(
|
| 131 |
+
reference_audio_bytes=reference_audio_bytes,
|
| 132 |
+
ref_seconds=request.ref_seconds or REF_AUDIO_SECONDS,
|
| 133 |
+
)
|
| 134 |
+
except HTTPException:
|
| 135 |
+
raise
|
| 136 |
+
except Exception as exc:
|
| 137 |
+
raise HTTPException(status_code=400, detail=f"Failed to encode reference audio: {exc}") from exc
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@app.post("/v1/audio/speech")
|
| 141 |
+
async def openai_speech(request: OpenAISpeechRequest):
|
| 142 |
+
"""OpenAI-compatible speech generation endpoint
|
| 143 |
+
|
| 144 |
+
Supports both streaming (SSE) and non-streaming modes:
|
| 145 |
+
- Without stream_format: Returns complete audio file (WAV or PCM)
|
| 146 |
+
- With stream_format="sse": Returns Server-Sent Events with audio chunks
|
| 147 |
+
"""
|
| 148 |
+
if not generator or not player:
|
| 149 |
+
raise HTTPException(status_code=503, detail="TTS models not initialized")
|
| 150 |
+
|
| 151 |
+
prompt_text = request.input
|
| 152 |
+
reference_audio_tokens, reference_frames = _resolve_reference_tokens(request)
|
| 153 |
+
|
| 154 |
+
# Streaming mode (SSE)
|
| 155 |
+
if request.stream_format == "sse":
|
| 156 |
+
async def sse_generator():
|
| 157 |
+
"""Generate Server-Sent Events with audio chunks"""
|
| 158 |
+
import asyncio
|
| 159 |
+
import queue as thread_queue
|
| 160 |
+
from generation.chunking import estimate_duration, split_into_sentences
|
| 161 |
+
|
| 162 |
+
chunk_queue = thread_queue.Queue()
|
| 163 |
+
|
| 164 |
+
# Estimate duration to determine if we need long-form generation
|
| 165 |
+
estimated_duration = estimate_duration(request.input)
|
| 166 |
+
use_long_form = estimated_duration > LONG_FORM_THRESHOLD_SECONDS
|
| 167 |
+
|
| 168 |
+
# Track token counts for usage reporting
|
| 169 |
+
input_token_count = 0
|
| 170 |
+
output_token_count = 0
|
| 171 |
+
|
| 172 |
+
if use_long_form:
|
| 173 |
+
# Long-form streaming: stream each sentence chunk as it's generated
|
| 174 |
+
print(f"[Server] Using long-form SSE streaming (estimated {estimated_duration:.1f}s)")
|
| 175 |
+
|
| 176 |
+
async def generate_async_long_form():
|
| 177 |
+
nonlocal input_token_count, output_token_count
|
| 178 |
+
try:
|
| 179 |
+
# Split into chunks
|
| 180 |
+
chunks = split_into_sentences(request.input, max_duration_seconds=request.max_chunk_duration or LONG_FORM_CHUNK_DURATION)
|
| 181 |
+
total_chunks = len(chunks)
|
| 182 |
+
|
| 183 |
+
for i, text_chunk in enumerate(chunks):
|
| 184 |
+
# Custom list wrapper that pushes chunks to queue
|
| 185 |
+
class ChunkList(list):
|
| 186 |
+
def append(self, chunk):
|
| 187 |
+
super().append(chunk)
|
| 188 |
+
chunk_queue.put(("chunk", chunk))
|
| 189 |
+
|
| 190 |
+
audio_writer = StreamingAudioWriter(
|
| 191 |
+
player,
|
| 192 |
+
output_file=None,
|
| 193 |
+
chunk_size=CHUNK_SIZE,
|
| 194 |
+
lookback_frames=LOOKBACK_FRAMES
|
| 195 |
+
)
|
| 196 |
+
audio_writer.audio_chunks = ChunkList()
|
| 197 |
+
audio_writer.start()
|
| 198 |
+
|
| 199 |
+
result = await generator._generate_async(
|
| 200 |
+
text_chunk,
|
| 201 |
+
audio_writer,
|
| 202 |
+
max_tokens=MAX_TOKENS,
|
| 203 |
+
reference_audio_tokens=reference_audio_tokens,
|
| 204 |
+
)
|
| 205 |
+
audio_writer.finalize()
|
| 206 |
+
|
| 207 |
+
# Track tokens
|
| 208 |
+
input_token_count += len(generator.prepare_input(text_chunk, reference_audio_tokens=reference_audio_tokens))
|
| 209 |
+
output_token_count += len(result.get('all_token_ids', []))
|
| 210 |
+
|
| 211 |
+
# Add silence between chunks (except after last chunk)
|
| 212 |
+
if i < total_chunks - 1:
|
| 213 |
+
silence_samples = int((request.silence_duration or LONG_FORM_SILENCE_DURATION) * 22050)
|
| 214 |
+
silence = np.zeros(silence_samples, dtype=np.float32)
|
| 215 |
+
chunk_queue.put(("chunk", silence))
|
| 216 |
+
|
| 217 |
+
chunk_queue.put(("done", {"input": input_token_count, "output": output_token_count}))
|
| 218 |
+
except Exception as e:
|
| 219 |
+
print(f"Generation error: {e}")
|
| 220 |
+
import traceback
|
| 221 |
+
traceback.print_exc()
|
| 222 |
+
chunk_queue.put(("error", str(e)))
|
| 223 |
+
|
| 224 |
+
gen_task = asyncio.create_task(generate_async_long_form())
|
| 225 |
+
else:
|
| 226 |
+
# Standard streaming for short texts
|
| 227 |
+
print(f"[Server] Using standard SSE streaming (estimated {estimated_duration:.1f}s)")
|
| 228 |
+
|
| 229 |
+
# Custom list wrapper that pushes chunks to queue
|
| 230 |
+
class ChunkList(list):
|
| 231 |
+
def append(self, chunk):
|
| 232 |
+
super().append(chunk)
|
| 233 |
+
chunk_queue.put(("chunk", chunk))
|
| 234 |
+
|
| 235 |
+
audio_writer = StreamingAudioWriter(
|
| 236 |
+
player,
|
| 237 |
+
output_file=None,
|
| 238 |
+
chunk_size=CHUNK_SIZE,
|
| 239 |
+
lookback_frames=LOOKBACK_FRAMES
|
| 240 |
+
)
|
| 241 |
+
audio_writer.audio_chunks = ChunkList()
|
| 242 |
+
|
| 243 |
+
# Start generation in background task
|
| 244 |
+
async def generate_async():
|
| 245 |
+
nonlocal input_token_count, output_token_count
|
| 246 |
+
try:
|
| 247 |
+
audio_writer.start()
|
| 248 |
+
result = await generator._generate_async(
|
| 249 |
+
prompt_text,
|
| 250 |
+
audio_writer,
|
| 251 |
+
max_tokens=MAX_TOKENS,
|
| 252 |
+
reference_audio_tokens=reference_audio_tokens,
|
| 253 |
+
)
|
| 254 |
+
audio_writer.finalize()
|
| 255 |
+
|
| 256 |
+
# Extract token counts from result
|
| 257 |
+
input_token_count = len(generator.prepare_input(prompt_text, reference_audio_tokens=reference_audio_tokens))
|
| 258 |
+
output_token_count = len(result.get('all_token_ids', []))
|
| 259 |
+
|
| 260 |
+
chunk_queue.put(("done", {"input": input_token_count, "output": output_token_count}))
|
| 261 |
+
except Exception as e:
|
| 262 |
+
print(f"Generation error: {e}")
|
| 263 |
+
import traceback
|
| 264 |
+
traceback.print_exc()
|
| 265 |
+
chunk_queue.put(("error", str(e)))
|
| 266 |
+
|
| 267 |
+
# Start generation as async task
|
| 268 |
+
gen_task = asyncio.create_task(generate_async())
|
| 269 |
+
|
| 270 |
+
# Stream chunks as they arrive
|
| 271 |
+
try:
|
| 272 |
+
while True:
|
| 273 |
+
msg_type, data = await asyncio.get_event_loop().run_in_executor(
|
| 274 |
+
None, lambda: chunk_queue.get(timeout=30)
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
if msg_type == "chunk":
|
| 278 |
+
# Convert numpy array to int16 PCM
|
| 279 |
+
pcm_data = (data * 32767).astype(np.int16)
|
| 280 |
+
|
| 281 |
+
# Encode as base64
|
| 282 |
+
audio_base64 = base64.b64encode(pcm_data.tobytes()).decode('utf-8')
|
| 283 |
+
|
| 284 |
+
# Send SSE event: speech.audio.delta
|
| 285 |
+
event_data = {
|
| 286 |
+
"type": "speech.audio.delta",
|
| 287 |
+
"audio": audio_base64
|
| 288 |
+
}
|
| 289 |
+
yield f"data: {json.dumps(event_data)}\n\n"
|
| 290 |
+
|
| 291 |
+
elif msg_type == "done":
|
| 292 |
+
# Send SSE event: speech.audio.done with usage stats
|
| 293 |
+
token_counts = data
|
| 294 |
+
event_data = {
|
| 295 |
+
"type": "speech.audio.done",
|
| 296 |
+
"usage": {
|
| 297 |
+
"input_tokens": token_counts["input"],
|
| 298 |
+
"output_tokens": token_counts["output"],
|
| 299 |
+
"total_tokens": token_counts["input"] + token_counts["output"]
|
| 300 |
+
}
|
| 301 |
+
}
|
| 302 |
+
yield f"data: {json.dumps(event_data)}\n\n"
|
| 303 |
+
break
|
| 304 |
+
|
| 305 |
+
elif msg_type == "error":
|
| 306 |
+
# Send error event
|
| 307 |
+
error_data = {
|
| 308 |
+
"type": "error",
|
| 309 |
+
"error": data
|
| 310 |
+
}
|
| 311 |
+
yield f"data: {json.dumps(error_data)}\n\n"
|
| 312 |
+
break
|
| 313 |
+
|
| 314 |
+
finally:
|
| 315 |
+
await gen_task
|
| 316 |
+
|
| 317 |
+
return StreamingResponse(
|
| 318 |
+
sse_generator(),
|
| 319 |
+
media_type="text/event-stream",
|
| 320 |
+
headers={
|
| 321 |
+
"Cache-Control": "no-cache",
|
| 322 |
+
"Connection": "keep-alive",
|
| 323 |
+
"X-Accel-Buffering": "no"
|
| 324 |
+
}
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
# Non-streaming mode (complete audio file)
|
| 328 |
+
else:
|
| 329 |
+
try:
|
| 330 |
+
# Estimate duration to determine if we need long-form generation
|
| 331 |
+
from generation.chunking import estimate_duration
|
| 332 |
+
estimated_duration = estimate_duration(request.input)
|
| 333 |
+
|
| 334 |
+
# Use long-form generation for longer texts
|
| 335 |
+
use_long_form = estimated_duration > 15.0
|
| 336 |
+
|
| 337 |
+
if use_long_form:
|
| 338 |
+
print(f"[Server] Using long-form generation (estimated {estimated_duration:.1f}s)")
|
| 339 |
+
result = await generator.generate_long_form_async(
|
| 340 |
+
text=request.input,
|
| 341 |
+
reference_audio_tokens=reference_audio_tokens,
|
| 342 |
+
player=player,
|
| 343 |
+
max_chunk_duration=request.max_chunk_duration or LONG_FORM_CHUNK_DURATION,
|
| 344 |
+
silence_duration=request.silence_duration or LONG_FORM_SILENCE_DURATION,
|
| 345 |
+
max_tokens=MAX_TOKENS
|
| 346 |
+
)
|
| 347 |
+
full_audio = result['audio']
|
| 348 |
+
else:
|
| 349 |
+
# Standard generation for short texts
|
| 350 |
+
print(f"[Server] Using standard generation (estimated {estimated_duration:.1f}s)")
|
| 351 |
+
audio_writer = StreamingAudioWriter(
|
| 352 |
+
player,
|
| 353 |
+
output_file=None,
|
| 354 |
+
chunk_size=CHUNK_SIZE,
|
| 355 |
+
lookback_frames=LOOKBACK_FRAMES
|
| 356 |
+
)
|
| 357 |
+
audio_writer.start()
|
| 358 |
+
|
| 359 |
+
# Generate speech
|
| 360 |
+
result = await generator._generate_async(
|
| 361 |
+
prompt_text,
|
| 362 |
+
audio_writer,
|
| 363 |
+
max_tokens=MAX_TOKENS,
|
| 364 |
+
reference_audio_tokens=reference_audio_tokens,
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
# Finalize and get audio
|
| 368 |
+
audio_writer.finalize()
|
| 369 |
+
|
| 370 |
+
if not audio_writer.audio_chunks:
|
| 371 |
+
raise HTTPException(status_code=500, detail="No audio generated")
|
| 372 |
+
|
| 373 |
+
# Concatenate all chunks
|
| 374 |
+
full_audio = np.concatenate(audio_writer.audio_chunks)
|
| 375 |
+
|
| 376 |
+
# Return based on response_format
|
| 377 |
+
if request.response_format == "pcm":
|
| 378 |
+
# Return raw PCM (int16)
|
| 379 |
+
pcm_data = (full_audio * 32767).astype(np.int16)
|
| 380 |
+
return Response(
|
| 381 |
+
content=pcm_data.tobytes(),
|
| 382 |
+
media_type="application/octet-stream",
|
| 383 |
+
headers={
|
| 384 |
+
"Content-Type": "application/octet-stream",
|
| 385 |
+
"X-Sample-Rate": "22050",
|
| 386 |
+
"X-Channels": "1",
|
| 387 |
+
"X-Bit-Depth": "16"
|
| 388 |
+
}
|
| 389 |
+
)
|
| 390 |
+
else: # wav
|
| 391 |
+
# Convert to WAV bytes
|
| 392 |
+
wav_buffer = io.BytesIO()
|
| 393 |
+
wav_write(wav_buffer, 22050, full_audio)
|
| 394 |
+
wav_buffer.seek(0)
|
| 395 |
+
|
| 396 |
+
return Response(
|
| 397 |
+
content=wav_buffer.read(),
|
| 398 |
+
media_type="audio/wav"
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
except Exception as e:
|
| 402 |
+
print(e)
|
| 403 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
@app.get("/")
|
| 407 |
+
async def root():
|
| 408 |
+
"""Root endpoint with API info"""
|
| 409 |
+
return {
|
| 410 |
+
"name": "Kani TTS API",
|
| 411 |
+
"version": "1.0.0",
|
| 412 |
+
"endpoints": {
|
| 413 |
+
"/v1/audio/speech": "POST - ref-audio speech generation",
|
| 414 |
+
"/health": "GET - Health check"
|
| 415 |
+
}
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
if __name__ == "__main__":
|
| 420 |
+
import uvicorn
|
| 421 |
+
print("🎤 Starting Kani TTS Server...")
|
| 422 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
test_rtf.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Quick test to verify RTF output"""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import os
|
| 5 |
+
from generation.vllm_generator import VLLMTTSGenerator
|
| 6 |
+
from audio import LLMAudioPlayer, StreamingAudioWriter
|
| 7 |
+
from config import CHUNK_SIZE, LOOKBACK_FRAMES, REF_AUDIO_SECONDS, GPU_MEMORY_UTILIZATION, MAX_MODEL_LEN
|
| 8 |
+
|
| 9 |
+
async def main():
|
| 10 |
+
print("Initializing VLLM generator...")
|
| 11 |
+
generator = VLLMTTSGenerator(
|
| 12 |
+
tensor_parallel_size=1,
|
| 13 |
+
gpu_memory_utilization=GPU_MEMORY_UTILIZATION,
|
| 14 |
+
max_model_len=MAX_MODEL_LEN,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
# Initialize engine
|
| 18 |
+
await generator.initialize_engine()
|
| 19 |
+
|
| 20 |
+
player = LLMAudioPlayer(generator.tokenizer)
|
| 21 |
+
|
| 22 |
+
prompt = "Това е кратък тест за real-time factor на ref-audio сървъра."
|
| 23 |
+
reference_audio_path = os.environ.get("KANITTS_TEST_REF_AUDIO", "/home/nasko/besttts/REF/woman.wav")
|
| 24 |
+
reference_audio_tokens, reference_frames = player.prepare_reference_audio_tokens(
|
| 25 |
+
reference_audio_path=reference_audio_path,
|
| 26 |
+
ref_seconds=REF_AUDIO_SECONDS,
|
| 27 |
+
)
|
| 28 |
+
print(f"Reference frames: {reference_frames}")
|
| 29 |
+
|
| 30 |
+
audio_writer = StreamingAudioWriter(
|
| 31 |
+
player,
|
| 32 |
+
output_file=None,
|
| 33 |
+
chunk_size=CHUNK_SIZE,
|
| 34 |
+
lookback_frames=LOOKBACK_FRAMES
|
| 35 |
+
)
|
| 36 |
+
audio_writer.start()
|
| 37 |
+
|
| 38 |
+
# Generate
|
| 39 |
+
result = await generator._generate_async(
|
| 40 |
+
prompt,
|
| 41 |
+
audio_writer,
|
| 42 |
+
reference_audio_tokens=reference_audio_tokens,
|
| 43 |
+
)
|
| 44 |
+
audio_writer.finalize()
|
| 45 |
+
|
| 46 |
+
# Print results
|
| 47 |
+
print(f"\nResults:")
|
| 48 |
+
print(f" Tokens: {len(result['all_token_ids'])}")
|
| 49 |
+
print(f" Audio duration: {result['audio_duration']:.2f}s")
|
| 50 |
+
print(f" Generation time: {result['generation_time']:.2f}s")
|
| 51 |
+
print(f" RTF: {result['rtf']:.3f}")
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
asyncio.run(main())
|