diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..0ba2cd03177b4efe63ee594fa8b998367c241eef 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+outputs/02-25-2026/final_21-53-52_s1.wav filter=lfs diff=lfs merge=lfs -text
+outputs/02-25-2026/final_21-57-01_s1.wav filter=lfs diff=lfs merge=lfs -text
+outputs/02-25-2026/final_22-01-33_s1.wav filter=lfs diff=lfs merge=lfs -text
+outputs/02-25-2026/final_22-05-17_s1.wav filter=lfs diff=lfs merge=lfs -text
diff --git a/bark/__init__.py b/bark/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0b17c8b44869c554931c723446c65d3903821a9
--- /dev/null
+++ b/bark/__init__.py
@@ -0,0 +1,2 @@
+from .api import generate_audio, text_to_semantic, semantic_to_waveform, save_as_prompt
+from .generation import SAMPLE_RATE, preload_models
diff --git a/bark/__main__.py b/bark/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cafccbafc79cf668801fe36c883a1a624f98913
--- /dev/null
+++ b/bark/__main__.py
@@ -0,0 +1,3 @@
+from .cli import cli
+
+cli()
diff --git a/bark/__pycache__/__init__.cpython-313.pyc b/bark/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed468dcbabe6fb08d66f658aa7ac5d8d74b83540
Binary files /dev/null and b/bark/__pycache__/__init__.cpython-313.pyc differ
diff --git a/bark/__pycache__/api.cpython-313.pyc b/bark/__pycache__/api.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2139f4634c1980a888668f402407872b10bd482
Binary files /dev/null and b/bark/__pycache__/api.cpython-313.pyc differ
diff --git a/bark/__pycache__/generation.cpython-313.pyc b/bark/__pycache__/generation.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2104fe9553f134806664fcdce893a1c6cbc592ea
Binary files /dev/null and b/bark/__pycache__/generation.cpython-313.pyc differ
diff --git a/bark/__pycache__/model.cpython-313.pyc b/bark/__pycache__/model.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc074f35d7cfab885be648cbb476e7d12d5baec0
Binary files /dev/null and b/bark/__pycache__/model.cpython-313.pyc differ
diff --git a/bark/__pycache__/model_fine.cpython-313.pyc b/bark/__pycache__/model_fine.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b2c24bdabdacb232dc7c03b73747e99690b1be1
Binary files /dev/null and b/bark/__pycache__/model_fine.cpython-313.pyc differ
diff --git a/bark/__pycache__/settings.cpython-313.pyc b/bark/__pycache__/settings.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e625afa3cae44a4ed0136b5581e3a7a42cb5ccab
Binary files /dev/null and b/bark/__pycache__/settings.cpython-313.pyc differ
diff --git a/bark/api.py b/bark/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a4319ceaa13798912637290f8e9e88c50d5420a
--- /dev/null
+++ b/bark/api.py
@@ -0,0 +1,158 @@
+from typing import Dict, Optional, Union
+
+import numpy as np
+
+from .generation import codec_decode, generate_coarse, generate_fine, generate_text_semantic
+
+
+def generate_with_settings(text_prompt, semantic_temp=0.6, eos_p=0.2, coarse_temp=0.7, fine_temp=0.5, voice_name=None, output_full=False):
+
+ # generation with more control
+ x_semantic = generate_text_semantic(
+ text_prompt,
+ history_prompt=voice_name,
+ temp=semantic_temp,
+ min_eos_p = eos_p,
+ use_kv_caching=True
+ )
+
+ x_coarse_gen = generate_coarse(
+ x_semantic,
+ history_prompt=voice_name,
+ temp=coarse_temp,
+ use_kv_caching=True
+ )
+ x_fine_gen = generate_fine(
+ x_coarse_gen,
+ history_prompt=voice_name,
+ temp=fine_temp,
+ )
+
+ if output_full:
+ full_generation = {
+ 'semantic_prompt': x_semantic,
+ 'coarse_prompt': x_coarse_gen,
+ 'fine_prompt': x_fine_gen
+ }
+ return full_generation, codec_decode(x_fine_gen)
+ return codec_decode(x_fine_gen)
+
+
+def text_to_semantic(
+ text: str,
+ history_prompt: Optional[Union[Dict, str]] = None,
+ temp: float = 0.7,
+ silent: bool = False,
+):
+ """Generate semantic array from text.
+
+ Args:
+ text: text to be turned into audio
+ history_prompt: history choice for audio cloning
+ temp: generation temperature (1.0 more diverse, 0.0 more conservative)
+ silent: disable progress bar
+
+ Returns:
+ numpy semantic array to be fed into `semantic_to_waveform`
+ """
+ x_semantic = generate_text_semantic(
+ text,
+ history_prompt=history_prompt,
+ temp=temp,
+ silent=silent,
+ use_kv_caching=True
+ )
+ return x_semantic
+
+
+def semantic_to_waveform(
+ semantic_tokens: np.ndarray,
+ history_prompt: Optional[Union[Dict, str]] = None,
+ temp: float = 0.7,
+ silent: bool = False,
+ output_full: bool = False,
+):
+ """Generate audio array from semantic input.
+
+ Args:
+ semantic_tokens: semantic token output from `text_to_semantic`
+ history_prompt: history choice for audio cloning
+ temp: generation temperature (1.0 more diverse, 0.0 more conservative)
+ silent: disable progress bar
+ output_full: return full generation to be used as a history prompt
+
+ Returns:
+ numpy audio array at sample frequency 24khz
+ """
+ coarse_tokens = generate_coarse(
+ semantic_tokens,
+ history_prompt=history_prompt,
+ temp=temp,
+ silent=silent,
+ use_kv_caching=True
+ )
+ fine_tokens = generate_fine(
+ coarse_tokens,
+ history_prompt=history_prompt,
+ temp=0.5,
+ )
+ audio_arr = codec_decode(fine_tokens)
+ if output_full:
+ full_generation = {
+ "semantic_prompt": semantic_tokens,
+ "coarse_prompt": coarse_tokens,
+ "fine_prompt": fine_tokens,
+ }
+ return full_generation, audio_arr
+ return audio_arr
+
+
+def save_as_prompt(filepath, full_generation):
+ assert(filepath.endswith(".npz"))
+ assert(isinstance(full_generation, dict))
+ assert("semantic_prompt" in full_generation)
+ assert("coarse_prompt" in full_generation)
+ assert("fine_prompt" in full_generation)
+ np.savez(filepath, **full_generation)
+
+
+def generate_audio(
+ text: str,
+ history_prompt: Optional[Union[Dict, str]] = None,
+ text_temp: float = 0.7,
+ waveform_temp: float = 0.7,
+ silent: bool = False,
+ output_full: bool = False,
+):
+ """Generate audio array from input text.
+
+ Args:
+ text: text to be turned into audio
+ history_prompt: history choice for audio cloning
+ text_temp: generation temperature (1.0 more diverse, 0.0 more conservative)
+ waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative)
+ silent: disable progress bar
+ output_full: return full generation to be used as a history prompt
+
+ Returns:
+ numpy audio array at sample frequency 24khz
+ """
+ semantic_tokens = text_to_semantic(
+ text,
+ history_prompt=history_prompt,
+ temp=text_temp,
+ silent=silent,
+ )
+ out = semantic_to_waveform(
+ semantic_tokens,
+ history_prompt=history_prompt,
+ temp=waveform_temp,
+ silent=silent,
+ output_full=output_full,
+ )
+ if output_full:
+ full_generation, audio_arr = out
+ return full_generation, audio_arr
+ else:
+ audio_arr = out
+ return audio_arr
diff --git a/bark/assets/prompts/announcer.npz b/bark/assets/prompts/announcer.npz
new file mode 100644
index 0000000000000000000000000000000000000000..28e92eb5d6361c9322119ccc9acdc5c4d9183561
--- /dev/null
+++ b/bark/assets/prompts/announcer.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26f2d1a9e3b6fe453cf5fc8191de26cbfae6276c5b0f7c376c6a0f3c35867f83
+size 16794
diff --git a/bark/assets/prompts/custom/evil.npz b/bark/assets/prompts/custom/evil.npz
new file mode 100644
index 0000000000000000000000000000000000000000..161df3e93112369cf526a7752a5c9a1643750cec
--- /dev/null
+++ b/bark/assets/prompts/custom/evil.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0474629d1ca65c36ff054a335dc4c790766985c76f27e20d264167753c6726e0
+size 1379260
diff --git a/bark/assets/prompts/custom/readme.md b/bark/assets/prompts/custom/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..f472a4d3df625f6c8145dd8d15b5b67f93f43cde
--- /dev/null
+++ b/bark/assets/prompts/custom/readme.md
@@ -0,0 +1 @@
+For convenience, place your custom prompts here...
\ No newline at end of file
diff --git a/bark/assets/prompts/de_speaker_0.npz b/bark/assets/prompts/de_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..5b2eb4d730924729d53b33ecdd393bfeec76f90e
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:008d7f3d0a52305a80c1abce26ccf4120181554a24055a0581894819b14f998d
+size 31940
diff --git a/bark/assets/prompts/de_speaker_1.npz b/bark/assets/prompts/de_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..cb6e76ac746434bfcc7826e55ff8fefb46f30d21
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b5bb2ac34fa466f5d6804f48f51658d7b7d8d91ce7139d34c717c917578858fb
+size 31940
diff --git a/bark/assets/prompts/de_speaker_2.npz b/bark/assets/prompts/de_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d0184c01c7ab4dfe5b5c10fae329933840e5d6d2
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1dedc8ab1949653480223f0c0cf3ebd20406d39b52e19908d32275eb8cfaf4b9
+size 23516
diff --git a/bark/assets/prompts/de_speaker_3.npz b/bark/assets/prompts/de_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c3fccd1ebf0a489a12d99ab14178f491806f66ec
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5abe325e6306a7f96725fcc6186c0eb147d2f068ce14b863e086cbf52b1986e
+size 29060
diff --git a/bark/assets/prompts/de_speaker_4.npz b/bark/assets/prompts/de_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..823611fd0f09582ab20b5b73a2e476f146586208
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91d102ad045aabc996f487d0d4f0b3fd289ef2da200d1df289cf5da298d23796
+size 20316
diff --git a/bark/assets/prompts/de_speaker_5.npz b/bark/assets/prompts/de_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8fd40cba82a9c85dafacc83f96a4f5c9a583239d
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8aa116b450c74c60ef43d1fd141fe961e23ebeafdcb57991b22ae4a08c62cf44
+size 35084
diff --git a/bark/assets/prompts/de_speaker_6.npz b/bark/assets/prompts/de_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d232d37532510268e4d261751234ded7d3775870
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0f95bd28bc7382b7294c0bb187b18873aa9c050b3fe5793166c547200c8e2da9
+size 31724
diff --git a/bark/assets/prompts/de_speaker_7.npz b/bark/assets/prompts/de_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..13173efc4c4ab9c45ab8aaba01784e50c112385d
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:332c5aee851c0544e1ad587fbc477b8d4eb28e852192fcd969d97c894b028a2b
+size 59348
diff --git a/bark/assets/prompts/de_speaker_8.npz b/bark/assets/prompts/de_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..bdebbf4ea7e47f8dbed6893c9c05c0fb65d778c1
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0eefea2a0d702177f44df4b218b950119726c041cb505e1df36ab0fc0651018
+size 25116
diff --git a/bark/assets/prompts/de_speaker_9.npz b/bark/assets/prompts/de_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2a9e6f86eb1980e1d328b4aa692134333a03773a
--- /dev/null
+++ b/bark/assets/prompts/de_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:189e941a182411853351c56e422d51a4a8fad20f1f8b8f396042bb2ada3cceb2
+size 22180
diff --git a/bark/assets/prompts/en_speaker_0.npz b/bark/assets/prompts/en_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..6253a25757a8c7107b448bb788bffd27401d07f4
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb130b14872cc53381bdb867cee71c26a6d116af81dbf2542f3f44d11b8aaf3f
+size 22396
diff --git a/bark/assets/prompts/en_speaker_1.npz b/bark/assets/prompts/en_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8593e2a7f2d792ebace1d2d14e9826f0feb74779
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3cdc113954acb3839e9112437a029d482925236bce91294803a42e3f1f493aea
+size 18396
diff --git a/bark/assets/prompts/en_speaker_2.npz b/bark/assets/prompts/en_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..af149463949d88744498c1003d1721c5316020da
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c27653e7db430ba4518cb5306c62a228329f928bfa566f68334545f0949b5eea
+size 33860
diff --git a/bark/assets/prompts/en_speaker_3.npz b/bark/assets/prompts/en_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..55514b9f1abe3ec035bd973b38fa016066f7722e
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:22de48d9414836a5337e483b256ed916d51ece916c36669371d9e92b1323047b
+size 38124
diff --git a/bark/assets/prompts/en_speaker_4.npz b/bark/assets/prompts/en_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..5ca5182e6526ae713cc033be26d49396db4404de
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3481fe27c9ffc73b68783ebe122934e0430a888c199ade914e97433df73038c1
+size 21220
diff --git a/bark/assets/prompts/en_speaker_5.npz b/bark/assets/prompts/en_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4655dfd3a4a72968f727ef06ad31e2d1babfcbe9
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b661d1573ab2df0d89b4b51e79d727dd5bfccfe8d740a84594de4028e1a23057
+size 15516
diff --git a/bark/assets/prompts/en_speaker_6.npz b/bark/assets/prompts/en_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4ffaca98c9bf76ee62c7693f4b6939d5f1fe4aab
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0d8f92a1ea0383453614d1c20c8cfbeaf9ad28d9f5778f718bf0e54eb18c0245
+size 13436
diff --git a/bark/assets/prompts/en_speaker_7.npz b/bark/assets/prompts/en_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..11835338c58dd52dd2c5875e5ce6344f94ae17d7
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6fdbb2c04efb4e81d179369b614678adba1cac9da8cc76fe6c40396da681b3a3
+size 35084
diff --git a/bark/assets/prompts/en_speaker_8.npz b/bark/assets/prompts/en_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..359f5227b7cfffd7103805ea7dc3feb01b2eae3b
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b4233571cfc24030c9c2ed823f6393d8f3c99e26fef20d744a2e5ff59b93f086
+size 18980
diff --git a/bark/assets/prompts/en_speaker_9.npz b/bark/assets/prompts/en_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..853a75dbeafe384b7adc30aa83eccd26657abc87
--- /dev/null
+++ b/bark/assets/prompts/en_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb86c2ec884fcc906cb0d7342a9d84657f6d9abeac3c88c7b1bbfd1207ec09ca
+size 35940
diff --git a/bark/assets/prompts/es_speaker_0.npz b/bark/assets/prompts/es_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f623f750873cf95a954083e6af9d60cb9c3e0ece
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a4849970528104040e0ed6a96f9c705b58c72b5eee538baed1fa2283873b331
+size 27620
diff --git a/bark/assets/prompts/es_speaker_1.npz b/bark/assets/prompts/es_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8be6db7d61f6fe70b2def1ab10d8614c420ac3f4
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c41ca11134138c1cb6108f643c686f0d0c72f376a13576cd9490721a0916d07a
+size 25436
diff --git a/bark/assets/prompts/es_speaker_2.npz b/bark/assets/prompts/es_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1f5ce31fc85f4fea950bb0a4f4bce1387b131ddd
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9a6406ce99291a80f81bef895e1fd3d13b5204143d656cf0aa30c013f2974bd
+size 27620
diff --git a/bark/assets/prompts/es_speaker_3.npz b/bark/assets/prompts/es_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..bd4b5b9a0e18381e275938a8ab5c3cf12c4168f8
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f9e43586d2a185df543444fe3f7e604bfe56c9f1364f59c9671be75e88b14d02
+size 26500
diff --git a/bark/assets/prompts/es_speaker_4.npz b/bark/assets/prompts/es_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..214ea50a1737c87a0585be790c3fbbcf34bdb888
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52b4c89d19199265d9347ff83550ceeb5bead49c2552df776ef292f851d3de33
+size 24420
diff --git a/bark/assets/prompts/es_speaker_5.npz b/bark/assets/prompts/es_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1cb83ba796ef80847844b435c4a9098c36ce2fba
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c57dddcdf54e8e97813e887dc2e066efde628d17e10fad2a9824b552af485b2
+size 24900
diff --git a/bark/assets/prompts/es_speaker_6.npz b/bark/assets/prompts/es_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2f87cf761f8d118f367ea7f17fa7c87b05d0587d
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:22b2dc4980a17c3dcd5f2833cc0eaab5dec06e7233520885fa792f618606dc68
+size 34820
diff --git a/bark/assets/prompts/es_speaker_7.npz b/bark/assets/prompts/es_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d5e98054d1d72e8d4a9d307457f43d1765626f61
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c848b3561977abaed30f38fcda853283ae04c11457483347c8baaa2d5a5f94d3
+size 21596
diff --git a/bark/assets/prompts/es_speaker_8.npz b/bark/assets/prompts/es_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..e69d6c91831e5edfd06f6c074ba7452550b8c7f1
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:691b4a12bbfd8f0e04df1ed793de2a4ada97ae04a7546e3bee12aaa094b7e156
+size 18660
diff --git a/bark/assets/prompts/es_speaker_9.npz b/bark/assets/prompts/es_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..5a823149f919a9b6131b7c4a9a149cb200572a56
--- /dev/null
+++ b/bark/assets/prompts/es_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5dda9f490517edf9447e2f02de3bec3877515a086e9668d7f0abb0d800d82ab6
+size 22660
diff --git a/bark/assets/prompts/fr_speaker_0.npz b/bark/assets/prompts/fr_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4fb5c14e5de259906f040f48996954dc06fda09f
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f483b271820be529ffc95968a1b7cd5e5f63137c30649192b1e10a935a8b846c
+size 30604
diff --git a/bark/assets/prompts/fr_speaker_1.npz b/bark/assets/prompts/fr_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..211ac75f015f11bc4aca7d713442b4e7fb880438
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ba3805ef05a285f8501762900b1919631b2fd4274ee8d7cf4b4c432afd6a7635
+size 29324
diff --git a/bark/assets/prompts/fr_speaker_2.npz b/bark/assets/prompts/fr_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..3fd8b3a2114a63e20ba1bf683b6a900cf6f3481d
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b3e7654e74d80a7068745838b1640c72d3616fbb2fa8f88de997d252139f7b74
+size 51084
diff --git a/bark/assets/prompts/fr_speaker_3.npz b/bark/assets/prompts/fr_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4c2a4885f4f41a55996c587db29a96a30b9ced5a
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e67de23fa486d091eaea3d276dcf640ed0d34079fc5e78ae9e4ab0f758341af2
+size 31460
diff --git a/bark/assets/prompts/fr_speaker_4.npz b/bark/assets/prompts/fr_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f45f488d6f7edf2ff5ff5e9161c14050be5db5bb
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f0e02e7b5f98b834968a47b1dbbb7acb18b681152461ae08e16c4b5ee93cbbcd
+size 36364
diff --git a/bark/assets/prompts/fr_speaker_5.npz b/bark/assets/prompts/fr_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d80e28998de5668d712b03b3474f7e592e01b108
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f771bcf5db66f2865a8023874291a6d706154853c9c9bdecd0ab0aeae3bd0a59
+size 44044
diff --git a/bark/assets/prompts/fr_speaker_6.npz b/bark/assets/prompts/fr_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..585152d9d23da138343c515d2c55b7276dd6755c
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:21906f0c2dc2578662cdc6359a03a96e02aa296c02d0cd3c50cb9dca4379ae9a
+size 43564
diff --git a/bark/assets/prompts/fr_speaker_7.npz b/bark/assets/prompts/fr_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..b2192d2e117b7e1d19878d7b7fa2a99ab7d5f0bb
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51b48089d9a29cc2dc8db21393fb67558cfa75a6aa46d1b495d483d13fffa04d
+size 53908
diff --git a/bark/assets/prompts/fr_speaker_8.npz b/bark/assets/prompts/fr_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0ccf49d404284cf135084dd7d7c048c7c04f8201
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e949256eecd733f22eed7b27e61bcf9331108f88849b39882723a68dac9d8cf
+size 33060
diff --git a/bark/assets/prompts/fr_speaker_9.npz b/bark/assets/prompts/fr_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..50488e936dd37e2d5de8613424acbe18839b1693
--- /dev/null
+++ b/bark/assets/prompts/fr_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:064d376376a3986e9c576851c61679c26d82de9023cfd2bb5b4b58b49c89940f
+size 31244
diff --git a/bark/assets/prompts/hi_speaker_0.npz b/bark/assets/prompts/hi_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..570622903b1918a1c6a128fbfbbb6530186834f5
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e17c25e0974142d03a3e1faa9ad69d8c737e1a0ed69b190ccd6a6ede69f99665
+size 32580
diff --git a/bark/assets/prompts/hi_speaker_1.npz b/bark/assets/prompts/hi_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9a1d363af7637994128de495fef17c4adf0a768f
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a426fc71fc502ac801f171ea1aad7d5e2b1466a2f959033fa6a6397ffb24aae2
+size 23036
diff --git a/bark/assets/prompts/hi_speaker_2.npz b/bark/assets/prompts/hi_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..bf4d9ad393306579daee5d3caaf1b7501c59d0d2
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6c58938653d80a56381b63b0befd0b1efba59c304ccaa76cd3a3626f81a3207
+size 26820
diff --git a/bark/assets/prompts/hi_speaker_3.npz b/bark/assets/prompts/hi_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..501e53adc0729fbf4f81a295e2f28ac2429d8952
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:388ed364e507437d42947f9a2d44b5e59d975bf100fcdb1d32801a2607955046
+size 28684
diff --git a/bark/assets/prompts/hi_speaker_4.npz b/bark/assets/prompts/hi_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..317b52f0b0984fbda8910ee790c999f263c00801
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2995c7d84e750aedbed9a781b22227d630d8a29fd26d658815561f818e955d08
+size 24476
diff --git a/bark/assets/prompts/hi_speaker_5.npz b/bark/assets/prompts/hi_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..aa15bc55722df5844e67cd49d3e8a6754f16367d
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:80d8f948811ef8229e96c57bbc1450def49e2c8517a05ed15292419963df30ca
+size 33004
diff --git a/bark/assets/prompts/hi_speaker_6.npz b/bark/assets/prompts/hi_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2cd59c258d988f3c643c383ed5896e24dca91668
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4209c9fce350e20df2729820ca2539d60a19b618cb4c23bc3ab1391840a4a6e9
+size 24900
diff --git a/bark/assets/prompts/hi_speaker_7.npz b/bark/assets/prompts/hi_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..dd12b21b026f63cf23b289e93ca1854188794003
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0fe296c0a623a12e0e93f6d0ccc1b7662f0988fdc56797be3494cea4e8dcf7e0
+size 30020
diff --git a/bark/assets/prompts/hi_speaker_8.npz b/bark/assets/prompts/hi_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c0c17228d69e7dbe9c18bb383addd529d849c0e5
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b0136ce35d5848e253738d7a8cbdb19c4ceb471b54c0a2886fec22192c48a5d
+size 24956
diff --git a/bark/assets/prompts/hi_speaker_9.npz b/bark/assets/prompts/hi_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..186e5f787263c3532fded47a2f710dc8920e234e
--- /dev/null
+++ b/bark/assets/prompts/hi_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41d245ab7ebe601366e085138dddc72b8c06a23faf9f4763466d7413fce88995
+size 30180
diff --git a/bark/assets/prompts/it_speaker_0.npz b/bark/assets/prompts/it_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..7c602fa064907956d84ef72a50c2ba6c98edf19d
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5f9c87011c846349276873815c9c95bb2cde9d80b781f8b349f87a186b12039f
+size 46604
diff --git a/bark/assets/prompts/it_speaker_1.npz b/bark/assets/prompts/it_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..85a0aae447ce2a4d135561dac7a9e012b5e88859
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6bc095f18f987bee31e85a2aceed8ef298bc093828b70c325b54e198f3463cc
+size 24900
diff --git a/bark/assets/prompts/it_speaker_2.npz b/bark/assets/prompts/it_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..68fe40537e5cfd1d1f38a349c0703ffc2be453ba
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d2d1ed1c5a9937595f30c6955cfdebc8828db9d7ee40a86ae2c7409bbfc58839
+size 45268
diff --git a/bark/assets/prompts/it_speaker_3.npz b/bark/assets/prompts/it_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c83d9757533d0cd8070c77be1566567632b8a8da
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9f519930f3aad2d5b9a826bb2cb33370f98d9630fa4c781419fbd8ad2faa979
+size 52684
diff --git a/bark/assets/prompts/it_speaker_4.npz b/bark/assets/prompts/it_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..fd54abc8a7095becf62ab344393f4b97ddfd91b6
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86586c07ac2139c17bc17487e240f75d834818e1df67e9cf6b855ee54bdb6f12
+size 22396
diff --git a/bark/assets/prompts/it_speaker_5.npz b/bark/assets/prompts/it_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0cd1b38e7cda0b0635f5ccfda8987f9d2063706b
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a64ae582db6a03be5044d3091f1ee9b21f58446dacabd308bb556ceb60c54001
+size 42764
diff --git a/bark/assets/prompts/it_speaker_6.npz b/bark/assets/prompts/it_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..e45025d2e14570cfe0e8357442cfd0444fc1a15e
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a93e98ac9f725263ae677af7a9dc1693c9a318e1091bdead6e1028e9c92e683
+size 34180
diff --git a/bark/assets/prompts/it_speaker_7.npz b/bark/assets/prompts/it_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d2abfa5fcac70f3eb522093589db6745e8f6c79b
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eff33fcbf105889bc524a777fab002ec74e12de78396b312b002c4617789bcdc
+size 41268
diff --git a/bark/assets/prompts/it_speaker_8.npz b/bark/assets/prompts/it_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a56572c43ec413c89bf10dbf17ea095f8b502874
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5e076ba04d147bfec35248e304ddfae522915d9781f8a02d3e1e67a9a40ea72
+size 29964
diff --git a/bark/assets/prompts/it_speaker_9.npz b/bark/assets/prompts/it_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c577a115449c7d723271a562c73d14f5653523f8
--- /dev/null
+++ b/bark/assets/prompts/it_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb8bbe06a881e48a7f7a6a0ef93dee65fa35b37a0b295d054f4cbf5df040f0a8
+size 35940
diff --git a/bark/assets/prompts/ja_speaker_0.npz b/bark/assets/prompts/ja_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..deef4d6749928f6fd55697c9b0133f946fbf7391
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d9b793498fb04c1aef4355dac1ccf8dae0b8365f84d539d2d4374bfb4882267
+size 24900
diff --git a/bark/assets/prompts/ja_speaker_1.npz b/bark/assets/prompts/ja_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..da9dc373d9f28adf4d4cdcfad64bf4fdfbc1619b
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7b55962d62c708e3446cd654962b558de803081b84dd9a074602fa224e66203
+size 25220
diff --git a/bark/assets/prompts/ja_speaker_2.npz b/bark/assets/prompts/ja_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0d8ab3b1ac8a4956f6f850255d44515048d0133e
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6865c99d9cbf70ac3c75e3c9f083074cc5a1247771e4989bc0029e5b8265c3f4
+size 44148
diff --git a/bark/assets/prompts/ja_speaker_3.npz b/bark/assets/prompts/ja_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..ddac82e2abad3e958e002ba76ef33284d9725c57
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e1af7de8c66f05fa54d3aa5f8968d4f4a97857da8b3f9653b9e42c0d22d5e9f
+size 24796
diff --git a/bark/assets/prompts/ja_speaker_4.npz b/bark/assets/prompts/ja_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d6f78caff3e5cd1d6141562b8afa95b4317df669
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3baff37c42adc35f7e3a81e518afd9c02d9c45792a350b10ff72c3c28672857a
+size 37964
diff --git a/bark/assets/prompts/ja_speaker_5.npz b/bark/assets/prompts/ja_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..3d5488e6c83dd2fcac741a97e33ba32a494f5820
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39a1d7e383a57fa5325bd604f71ad51db42308e0461ff23f14e97b4d0c08c5a9
+size 22716
diff --git a/bark/assets/prompts/ja_speaker_6.npz b/bark/assets/prompts/ja_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..3e898497cd5b849c8aa1087b4e23126573532e42
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:259578cdb7eb4628b3d514a4bd7737ff3e76010f8a6f906241f24a4574e78b8c
+size 24580
diff --git a/bark/assets/prompts/ja_speaker_7.npz b/bark/assets/prompts/ja_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..97153367c600e5b4aa143dff679c2a973125ee30
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de23f5343a3e7d4fb807ec5e20ba37ea3eec925554924f03043f17550eaf9237
+size 33380
diff --git a/bark/assets/prompts/ja_speaker_8.npz b/bark/assets/prompts/ja_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8cc42d950eaa5d4c87fae6f63d0e2b82ebc0cfd6
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc2c1f34894cdd6ea6fc42686e7886da0b4f0782256afd811927ecc373715c63
+size 50548
diff --git a/bark/assets/prompts/ja_speaker_9.npz b/bark/assets/prompts/ja_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..5a25762ae7fad586283bdc3aa280fc8486878721
--- /dev/null
+++ b/bark/assets/prompts/ja_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:210a0a10d67480cebc8b4b5140d04f9c9e39ad85d1c1aec74cf59edbee4c0721
+size 29540
diff --git a/bark/assets/prompts/ko_speaker_0.npz b/bark/assets/prompts/ko_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..aa55581a2a38c245aec4aaee63eb68326baceef0
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b15efeee85aff5768722dcec6fb5fb440afabc91ecfc605c9fd02eddc4c4133d
+size 24156
diff --git a/bark/assets/prompts/ko_speaker_1.npz b/bark/assets/prompts/ko_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9d8e19308d4c2e4df2db1588da32fb21061e753e
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b1d1323b815260483e2c3da6a4b7c39091b7aa71d09e955351edf6f21ffe218
+size 26396
diff --git a/bark/assets/prompts/ko_speaker_2.npz b/bark/assets/prompts/ko_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4d0e7a49bca4ccdd72c9c152ca6ae45745828171
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9081b57e01e7717284f530d66dfe0ef2e8e3a0f0e8b2064db7cb8afc04f04954
+size 31940
diff --git a/bark/assets/prompts/ko_speaker_3.npz b/bark/assets/prompts/ko_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..b953a2461fff793ad18779fe04ee2f09015a7692
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:49c9472b53b209e5d435b05a8b842908ce10865ac0553d284f9b434330043a7f
+size 56628
diff --git a/bark/assets/prompts/ko_speaker_4.npz b/bark/assets/prompts/ko_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2d1281dd33f24a8c8fd6d52226a7e88215faead1
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:25a9840ebf6b57af5ac0e3f4a8ce734f8926e0571eaeaf0dfd7dbbcfc5745626
+size 23356
diff --git a/bark/assets/prompts/ko_speaker_5.npz b/bark/assets/prompts/ko_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9b1d783811226098e71d753257d9ed4c09852382
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5dc503a2f074f9df41cb407265c4a089179c7849d1dcd7774bc2439b616f25e8
+size 29004
diff --git a/bark/assets/prompts/ko_speaker_6.npz b/bark/assets/prompts/ko_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..3bb297dc00a4586b03b5923e43b620ef0bba0093
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:620d16320908c68ea72dd06ffcdcf61ba77f67a41a02cc4e537ff365b03fb519
+size 30500
diff --git a/bark/assets/prompts/ko_speaker_7.npz b/bark/assets/prompts/ko_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..82c3b45b4d8b87c1a1ea7782ef98cd19783fef74
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d8fbf756cbe523a21ca7400e514fea8c3a3afb3537eec49a2d0e21112a275b0
+size 22180
diff --git a/bark/assets/prompts/ko_speaker_8.npz b/bark/assets/prompts/ko_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9beb955d8682c4b0b77894c7ded8e8b6624d5d5b
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c883e76824b9b9a3ae1da4d7cdd6f5a23e868a1c73842ffd11739db067f9d5d2
+size 24476
diff --git a/bark/assets/prompts/ko_speaker_9.npz b/bark/assets/prompts/ko_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..efb35be697bced1c219ce1cd48b862b6d7a5d574
--- /dev/null
+++ b/bark/assets/prompts/ko_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36efef182c0f3e11aaf62ea69dbf006d75a7e23ff5e2a28086c9e21d06c9948a
+size 21916
diff --git a/bark/assets/prompts/pl_speaker_0.npz b/bark/assets/prompts/pl_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0fe56845b647bfa44c20d7f61148c923fe4af5d2
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fab5442ed0f4c23cd17613ca5bf321d2d2908e94cded6a529554e3e695f33eb5
+size 39780
diff --git a/bark/assets/prompts/pl_speaker_1.npz b/bark/assets/prompts/pl_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8f6e4e4297abf87be21a2c4dc7f84c1961c09a16
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c454b61f7762c08d6f1d8f2519b295c230533967aa41ca7376582160705434b6
+size 26500
diff --git a/bark/assets/prompts/pl_speaker_2.npz b/bark/assets/prompts/pl_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9ed3c7e2edaba9c1b130228b80666c316f975add
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1485325fbaddbb5840e6f4f2232ae65a79740496824f5bbea5e6b06538577749
+size 43084
diff --git a/bark/assets/prompts/pl_speaker_3.npz b/bark/assets/prompts/pl_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f5d41103a0a6316291e067dc5c149b9535c16752
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b01909eb0b9d18e9558ab54ec4b3a89ee74fe9001cb48b425fe996969ec84129
+size 42284
diff --git a/bark/assets/prompts/pl_speaker_4.npz b/bark/assets/prompts/pl_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f2a0666e42263c97fd5ddcf10d9c6057afd13248
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bbfb1f4acd490e7621fc37492c9bba7f108d8285e6210ab1108dd9cb8326f831
+size 42548
diff --git a/bark/assets/prompts/pl_speaker_5.npz b/bark/assets/prompts/pl_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..5a458f59471a11c07e85fa9025ba5539c0934312
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:543862aff2a35d6bf6592535369dfff8cae30e4cbafbaf2753e8041bab782d78
+size 34020
diff --git a/bark/assets/prompts/pl_speaker_6.npz b/bark/assets/prompts/pl_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..bd8d79c6293c7308a31cba38f721faf38ac8b780
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0648515f19e66cc1da1ae08144a0df14c7b3e0df2ad05b6ff869b0b4a4619573
+size 45324
diff --git a/bark/assets/prompts/pl_speaker_7.npz b/bark/assets/prompts/pl_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..27a05601ecf8c73af506c91d4d4be7b419013949
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3bff704fa071311dbc4b808729303b81b822b8d7c1293d10ad8d7398796350fe
+size 37380
diff --git a/bark/assets/prompts/pl_speaker_8.npz b/bark/assets/prompts/pl_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..319123cfcd8a389597d37c5f221a4c1bda06aadd
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f064caee991852fe1a1cec68fbf26dfef3fd988666ccee78b9ef61e7ebe84d5b
+size 33380
diff --git a/bark/assets/prompts/pl_speaker_9.npz b/bark/assets/prompts/pl_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..6f5d04b4d425c6960786ad3ad52c036230a7ae20
--- /dev/null
+++ b/bark/assets/prompts/pl_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d75c2d6e465bbdeeba5c6dccf83589f7eceb3b834d1f32b17b210049fa535df5
+size 36364
diff --git a/bark/assets/prompts/pt_speaker_0.npz b/bark/assets/prompts/pt_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..aa7bcaa2e66cfca6068b3d709d218fa53ddea914
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b5002193665e3baccde6ae281e156af974080ad86e535380c736f5bcc72b2435
+size 32420
diff --git a/bark/assets/prompts/pt_speaker_1.npz b/bark/assets/prompts/pt_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4811ebf7cb4acf9c21650b7404b0ecfa227bffbd
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b32252732efe10a7373d17f7272fe68c7009a129fed5359e75675d5cbb62930e
+size 58492
diff --git a/bark/assets/prompts/pt_speaker_2.npz b/bark/assets/prompts/pt_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..422d9de9c76d3005cb4fa3f2cc384970260c9148
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6ec426f1d138a57579c126cd7e69d4123bbe8d1c1587b9f83db7cb3b3cf963d
+size 21596
diff --git a/bark/assets/prompts/pt_speaker_3.npz b/bark/assets/prompts/pt_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..45cfcaa85992e1f0e1ca5ae6042cf778a185827a
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e41a05a94c01fd7810fa0b3986acd83cfebb55b76ac42d31e801113a9f3c599a
+size 35300
diff --git a/bark/assets/prompts/pt_speaker_4.npz b/bark/assets/prompts/pt_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0fdcbc0286b5bc99886fa13f11e20c9b615ca009
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8df852db686b39f17eeec6e00c794eb0d28254c2f3bd5b7659f84238df09d642
+size 49004
diff --git a/bark/assets/prompts/pt_speaker_5.npz b/bark/assets/prompts/pt_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..aa65f95d218ab1e39b7864ec8409a42ea12ca557
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f4b46e6a8483c8cacc9afaff6ef91f3be5b2059275853ff990911179f0f2112
+size 34444
diff --git a/bark/assets/prompts/pt_speaker_6.npz b/bark/assets/prompts/pt_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..76d3d0b304674e7b27751a578cc751127ad84862
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb3b9bc612d61196ac456145e7a571c4304f7643cef3b004934a0513117ed5c7
+size 56628
diff --git a/bark/assets/prompts/pt_speaker_7.npz b/bark/assets/prompts/pt_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..672e65b14dc5ca3473bb26546ca986070e3f9b2f
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dd9c8fab73e4d2d1d6170c23a78340c4e4ad4ddb9ed6864127474c38ca2907e1
+size 34020
diff --git a/bark/assets/prompts/pt_speaker_8.npz b/bark/assets/prompts/pt_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a9c89b45b9fbffb02dd4a9038bb0f0e560a6976f
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:584d7eb0ffd6552fd749718ccdd2422cc23cc04c4aab38725756335142914aff
+size 30284
diff --git a/bark/assets/prompts/pt_speaker_9.npz b/bark/assets/prompts/pt_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4303251b30bed8a332217d1009b14c4a324b5192
--- /dev/null
+++ b/bark/assets/prompts/pt_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:21724f6ac25a3aef785875a0cbd98c6d72fabd9e60aa982a8afa6608b59388ae
+size 58652
diff --git a/bark/assets/prompts/readme.md b/bark/assets/prompts/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..b01ae915d015f80c164253ac79e5e97e9b6e04b5
--- /dev/null
+++ b/bark/assets/prompts/readme.md
@@ -0,0 +1,30 @@
+# Example Prompts Data
+
+## Version Two
+The `v2` prompts are better engineered to follow text with a consistent voice.
+To use them, simply include `v2` in the prompt. For example
+```python
+from bark import generate_audio
+text_prompt = "madam I'm adam"
+audio_array = generate_audio(text_prompt, history_prompt="v2/en_speaker_1")
+```
+
+## Prompt Format
+The provided data is in the .npz format, which is a file format used in Python for storing arrays and data. The data contains three arrays: semantic_prompt, coarse_prompt, and fine_prompt.
+
+```semantic_prompt```
+
+The semantic_prompt array contains a sequence of token IDs generated by the BERT tokenizer from Hugging Face. These tokens encode the text input and are used as an input to generate the audio output. The shape of this array is (n,), where n is the number of tokens in the input text.
+
+```coarse_prompt```
+
+The coarse_prompt array is an intermediate output of the text-to-speech pipeline, and contains token IDs generated by the first two codebooks of the EnCodec Codec from Facebook. This step converts the semantic tokens into a different representation that is better suited for the subsequent step. The shape of this array is (2, m), where m is the number of tokens after conversion by the EnCodec Codec.
+
+```fine_prompt```
+
+The fine_prompt array is a further processed output of the pipeline, and contains 8 codebooks from the EnCodec Codec. These codebooks represent the final stage of tokenization, and the resulting tokens are used to generate the audio output. The shape of this array is (8, p), where p is the number of tokens after further processing by the EnCodec Codec.
+
+Overall, these arrays represent different stages of a text-to-speech pipeline that converts text input into synthesized audio output. The semantic_prompt array represents the input text, while coarse_prompt and fine_prompt represent intermediate and final stages of tokenization, respectively.
+
+
+
diff --git a/bark/assets/prompts/ru_speaker_0.npz b/bark/assets/prompts/ru_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4d7fbd1b61390cb294e4f211f8dd9445936d14fa
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f832edfe62de54ab56cd09862af428927f8e82ddbc371365c6a19db3b4fc1ab6
+size 57852
diff --git a/bark/assets/prompts/ru_speaker_1.npz b/bark/assets/prompts/ru_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4dee2ba3ce87f3ad5903e364d7c11ccee58cc7e4
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c72519f5060c8896d8131671e047e347614f555471a5f30da76fbc77acb5e8ee
+size 24260
diff --git a/bark/assets/prompts/ru_speaker_2.npz b/bark/assets/prompts/ru_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c52be3b31ea18c3817f715f8199fdd9f5c51abc9
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:87db3f55be72596b53afc7d2166ae38a1b6e5ba04880f4c392ff662ff14e41f4
+size 51668
diff --git a/bark/assets/prompts/ru_speaker_3.npz b/bark/assets/prompts/ru_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d6c1d1e8362ec8ac94bc3149d3705090c117b0ad
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b68d63b8ae2d46b68a67be76ef4cb7823c7640f2e855da05648bdea9a7c0871b
+size 29164
diff --git a/bark/assets/prompts/ru_speaker_4.npz b/bark/assets/prompts/ru_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..776cc437ca9a34349a6079a6ed1c76ba1c2766c3
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c15a3c0cb477b01ab4baecadc9781a398c9e82e1db6cc00f98c78d165af0e6b
+size 27940
diff --git a/bark/assets/prompts/ru_speaker_5.npz b/bark/assets/prompts/ru_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..12fa85e005492c85bc28589a43551999e03c7c17
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3bf201c1ea1ea44c77c0264f33dfeeee99d27d02498e77f23d63a56de4ebdeeb
+size 23356
diff --git a/bark/assets/prompts/ru_speaker_6.npz b/bark/assets/prompts/ru_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0046bb3967ea771370fff4da6fe8fe7d3060bd55
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a048c4676d46fbc86492813145e018ecf8790f00153e69bf080926f2a5ba594e
+size 45748
diff --git a/bark/assets/prompts/ru_speaker_7.npz b/bark/assets/prompts/ru_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..da5c1e4d7e3db00dead674973bc520a7b3025f5b
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:16078f0e920479b090000cba9fe6cd47be53f8ced6441ad0452267dd5b170870
+size 25380
diff --git a/bark/assets/prompts/ru_speaker_8.npz b/bark/assets/prompts/ru_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..549034bbcc1aaec1c9bcda7082f75d7a028e808b
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d4c8abbf2a202ccbce4f569233f13adad49cbec45dc9f5029c1e357882c4dbc7
+size 42924
diff --git a/bark/assets/prompts/ru_speaker_9.npz b/bark/assets/prompts/ru_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..af16ace1e06603ffa3e317d8dbb80a19bee69b65
--- /dev/null
+++ b/bark/assets/prompts/ru_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:756000ceb9eea65fa8a257cdee25ba7ec03e2c653c3d5913e0082540811f791d
+size 38500
diff --git a/bark/assets/prompts/speaker_0.npz b/bark/assets/prompts/speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4c531fb26cfafea44a9a1e90b4efe0ee4a79dc4e
--- /dev/null
+++ b/bark/assets/prompts/speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55bc30061b5c5928454e4c7a1d6206e359a25ca38fec3ca96de0a625fa96c572
+size 19620
diff --git a/bark/assets/prompts/speaker_1.npz b/bark/assets/prompts/speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d858600f97683c44cd72ccfa8badfa3b189f0467
--- /dev/null
+++ b/bark/assets/prompts/speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d5d5531998bd91684806eb64a2ac659d8c242f4112d6216697d3cae0b99b978
+size 21380
diff --git a/bark/assets/prompts/speaker_2.npz b/bark/assets/prompts/speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2452ed7bcab190bedde76dc7d7d3fe4d82643278
--- /dev/null
+++ b/bark/assets/prompts/speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3001ff8a04e64e0687b0ad145c92684c8758ce7af68fb330dcfee4739fd896b
+size 19460
diff --git a/bark/assets/prompts/speaker_3.npz b/bark/assets/prompts/speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..29d23b0a1795126c86f735f3e5f8af17de9184b5
--- /dev/null
+++ b/bark/assets/prompts/speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:08b20f307ff4a1e5a947f4394ce2f2c3c5e0e6a9f78e0fd77604fb08359ab90d
+size 32740
diff --git a/bark/assets/prompts/speaker_4.npz b/bark/assets/prompts/speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d12adb15a7bc72de351c53b046a6edbb46713cd4
--- /dev/null
+++ b/bark/assets/prompts/speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b6acddfa41ce84e558e09e91fae5fbb01704bc1cef0f000bcc7f30d05e51afc
+size 19676
diff --git a/bark/assets/prompts/speaker_5.npz b/bark/assets/prompts/speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1662063711535dffe2ec4c0711e940ca0bd78a7b
--- /dev/null
+++ b/bark/assets/prompts/speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:048c7362b237c43ceb0c3a4986b5c42c21ef013cadaf7c77b6348419f801dc93
+size 54548
diff --git a/bark/assets/prompts/speaker_6.npz b/bark/assets/prompts/speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9877675833fe910b4fd15b6938e35a8bf1434073
--- /dev/null
+++ b/bark/assets/prompts/speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d7359be4a984930a81103043409b695e383d493f4edd6d4786537b1730a95c0
+size 23516
diff --git a/bark/assets/prompts/speaker_7.npz b/bark/assets/prompts/speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f83e4af9176bc23fb0dbafaeadd0c3f24dcb14e4
--- /dev/null
+++ b/bark/assets/prompts/speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:560ccbd20b16a2313cdc44ed578c8fb4dcbe51c2d1c57756dc242d185a6b88d3
+size 22556
diff --git a/bark/assets/prompts/speaker_8.npz b/bark/assets/prompts/speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..dff9d012159fd857ba4070c99fb96a66a8c8de41
--- /dev/null
+++ b/bark/assets/prompts/speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26eb3e2589f21f88aa963f052cc5134c6510b1cdb0033be277733bc7dc77157c
+size 20580
diff --git a/bark/assets/prompts/speaker_9.npz b/bark/assets/prompts/speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..98fc91445386fe8ea4aabe7a9172d10e4298b557
--- /dev/null
+++ b/bark/assets/prompts/speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15ab7bbb47bf326e454cc1d299f4069d0fa9ea8e934273dbed4cbf1116404322
+size 18396
diff --git a/bark/assets/prompts/tr_speaker_0.npz b/bark/assets/prompts/tr_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8d037093d14edd7fb770878a545573d11648fd56
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:21c8f2c4e8b31b0a11c1565ba5ee104e11db4e3f83c6d8b44d52385692322d3b
+size 26020
diff --git a/bark/assets/prompts/tr_speaker_1.npz b/bark/assets/prompts/tr_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0eb88054072445f64d62370aabf7256505453acf
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:578ca20688ff603c6365a9f53076300cd17dec784532b4bb2e75de8a25f4781c
+size 24156
diff --git a/bark/assets/prompts/tr_speaker_2.npz b/bark/assets/prompts/tr_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..324a432c1456a37f490bd3358e440a5f8e4b07d2
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97013a34b28feb95881e5bcd4bea53e81acfcb5c4c896a6733e2a5e351242e6c
+size 32740
diff --git a/bark/assets/prompts/tr_speaker_3.npz b/bark/assets/prompts/tr_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..741c83a19f8867f98151203b043c9af46885de4e
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:117cd3cf2367f009d86849c75f85709cd227628b6b26ce7074b6196c2bb12132
+size 20100
diff --git a/bark/assets/prompts/tr_speaker_4.npz b/bark/assets/prompts/tr_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f06c02cf18b9a686aaa6a1c24b8dd402f8a1a4c2
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:102a94642852e99a171875a53a3f219196407d75bbec62191dcd3bd542aa9c64
+size 16100
diff --git a/bark/assets/prompts/tr_speaker_5.npz b/bark/assets/prompts/tr_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4395256a8e99798ed3c0733299ef3166c109365c
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c61bf6adc04f81f1a5fbcbac1c0257fd76769f122ab083f16b3e29e2a7eeae7a
+size 29220
diff --git a/bark/assets/prompts/tr_speaker_6.npz b/bark/assets/prompts/tr_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..34723602614bf90e80ab0cfd986e57af7513cafe
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6250cb26f5b4563e9be8e5ae24f3c3af386c5b52ea21ad99237edc08296e3b6d
+size 21596
diff --git a/bark/assets/prompts/tr_speaker_7.npz b/bark/assets/prompts/tr_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..b240fa58e2a72cd390f0afa813b55409c473a1d4
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1eeca993d97dd24a1115494872c062c35297f462270ed4062f3158b0f8af08ac
+size 21276
diff --git a/bark/assets/prompts/tr_speaker_8.npz b/bark/assets/prompts/tr_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..ebd583be48327212ea90e34e3aa5b5f1493333fe
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cba4b642845725653e6d18b55b892208b33878e3914daeb1bd86e9c2d6383e33
+size 35724
diff --git a/bark/assets/prompts/tr_speaker_9.npz b/bark/assets/prompts/tr_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..45b093169b0c0338303052a3c82a584710a26658
--- /dev/null
+++ b/bark/assets/prompts/tr_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5d493f6328ba149b7680e55cd6a9b7419b88df871981040a0cc4a51493b210b6
+size 19460
diff --git a/bark/assets/prompts/v2/de_speaker_0.npz b/bark/assets/prompts/v2/de_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..430a12b87d7d962ab47a562e8a93952f01c8ed5b
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82c8d443f71a46bca90e9323e0fd14c8beaaa55dbc690eb14b75b6b14497005a
+size 39620
diff --git a/bark/assets/prompts/v2/de_speaker_1.npz b/bark/assets/prompts/v2/de_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..edce987cc081817c674a0f0e98c8159e1d2982d6
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed1c4324e3f989d484d7ed433efa2082f87c845f8688e18417624210e979335d
+size 27460
diff --git a/bark/assets/prompts/v2/de_speaker_2.npz b/bark/assets/prompts/v2/de_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..643d1533ad5bacce08a9c42a013b28aa53c38faa
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:90f08869a1377c86ec525f0ad7aed10b4dcf7d75717b47a34d4b677d7e33e921
+size 24740
diff --git a/bark/assets/prompts/v2/de_speaker_3.npz b/bark/assets/prompts/v2/de_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..809646482f87622cf2bd2cc07989c4b6a48d04c8
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7da1e9457f8a5e082988652f202a5cc5320ac362f81ecfce5b1ce6edce2342d1
+size 31300
diff --git a/bark/assets/prompts/v2/de_speaker_4.npz b/bark/assets/prompts/v2/de_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a806a4c65d5543a95e0f25744267a7eb7ac7594b
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c53c565cedaa683bc2bf5577c0ad70c4d435b66055641234857dff5e743b2b5a
+size 30660
diff --git a/bark/assets/prompts/v2/de_speaker_5.npz b/bark/assets/prompts/v2/de_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..e5d6f6bd57fac5a6c30edf4849eab83f70451310
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a798e48483c89702c316478336939a5c5a579cb0dd9e76943eca1ece914e3bdc
+size 31300
diff --git a/bark/assets/prompts/v2/de_speaker_6.npz b/bark/assets/prompts/v2/de_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c11dbdad84dec46ffdb302b699474875ca404ce2
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d668bf7735343ca059cfc35c0d796a422cb05ae6172a244dfd7320958943304
+size 23196
diff --git a/bark/assets/prompts/v2/de_speaker_7.npz b/bark/assets/prompts/v2/de_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f442fb9e896dc1f7ee40672bfc742e4a158a05c5
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7aec16132be2de475b9d8889ff281cce60efa06f7910f8d2701aac75d119d9b4
+size 40100
diff --git a/bark/assets/prompts/v2/de_speaker_8.npz b/bark/assets/prompts/v2/de_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9f7efdeb0be1be9e287f3094b335ff55b9fa6e1a
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a81d5f6c95d347cc269679bc41bf5dc50fe644e01b472985f6dd46c9b578937
+size 28524
diff --git a/bark/assets/prompts/v2/de_speaker_9.npz b/bark/assets/prompts/v2/de_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..afeab53e486cbe349a9b4924077404eda3b53960
--- /dev/null
+++ b/bark/assets/prompts/v2/de_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:73667c2a678d5264d583085772297aa7451c20d48286dba57ebc43d78767de38
+size 51084
diff --git a/bark/assets/prompts/v2/en_speaker_0.npz b/bark/assets/prompts/v2/en_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2ccc5a8a08be9765800958b93858b5720b594665
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:932f40d879ba8659f1ca26319ba64ea3b0647b2050fe24313bf42b0dff1fe241
+size 28100
diff --git a/bark/assets/prompts/v2/en_speaker_1.npz b/bark/assets/prompts/v2/en_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..773451dd1073938fccf73895ec049042c9609bc0
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e7f18015e1ab9b6302ded1e28a971af5306a72f193bb6c411f1948a083c8578
+size 25220
diff --git a/bark/assets/prompts/v2/en_speaker_2.npz b/bark/assets/prompts/v2/en_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8a2f9e4366031f67781097371e08a36342635ff4
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d218990680ece5f2d4fc18ea4783b016b3ae353ec413eaee2058f2d57263c9b3
+size 26236
diff --git a/bark/assets/prompts/v2/en_speaker_3.npz b/bark/assets/prompts/v2/en_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..103cfb362b1ede1b67145d4c2384c7797e8d5ea4
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92c2e2a29145c83738e9b63f082fd1c873d9422468a155463cb27f814aeaea66
+size 34980
diff --git a/bark/assets/prompts/v2/en_speaker_4.npz b/bark/assets/prompts/v2/en_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..123777ca72c8bbd4d4548b48d6e0cae91b13ab0d
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:992f91991a9a5359d72f00b09a11a550e71bb8ebfc0cfd877e39d7d41f98b714
+size 23780
diff --git a/bark/assets/prompts/v2/en_speaker_5.npz b/bark/assets/prompts/v2/en_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..dcf05979f75c24b11888ab53da02ddb118c91459
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18831c3f6014e4a2ff60ad5169b1fae06e28ed07f43f8a3616aafb84515091bf
+size 24740
diff --git a/bark/assets/prompts/v2/en_speaker_6.npz b/bark/assets/prompts/v2/en_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..090f03f886a4eba3105a0d28e7b739fb600c2cd8
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fab38dc6b6bc9226bcc414f4c5a9524bc1b2441865a586153fb620127a8faa4e
+size 25540
diff --git a/bark/assets/prompts/v2/en_speaker_7.npz b/bark/assets/prompts/v2/en_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d5d9068bff806b7c6e1025720c5a2c1636ba8b36
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f4c4eb33f5994be8de5cfd1744ebce13da1618a6da3a7d244514178c61ef7db
+size 22716
diff --git a/bark/assets/prompts/v2/en_speaker_8.npz b/bark/assets/prompts/v2/en_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..99bdf0061c5d3377aa1aebe5759faa3f41aa27e1
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fc9f11b539588f51bbf78150a73e0365c49b2306bd72e5a22b28ef09c4fb15d
+size 23300
diff --git a/bark/assets/prompts/v2/en_speaker_9.npz b/bark/assets/prompts/v2/en_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2439d40fb6cf3a754c4ce305d3c95e8c463690d1
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:78b3ba32eb9aeb9ed34556856c40633ecc8332d1c3ae3c81e6f5015ac3eefbd5
+size 30180
diff --git a/bark/assets/prompts/v2/es_speaker_0.npz b/bark/assets/prompts/v2/es_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1dc7eeefbfc994e558c051b68ea4ff054890732f
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:753ccfffe8b5f1a8dbb44bf6fb7bb66f39d11adedb37204256f194f6c8bf0205
+size 22020
diff --git a/bark/assets/prompts/v2/es_speaker_1.npz b/bark/assets/prompts/v2/es_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..778130eb7e527c3724a952bdfcc4fc5cb33c2c0d
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68574669097c67e420bc587c2043310d5c1de3f8a65280207f9c08bd577a5906
+size 25116
diff --git a/bark/assets/prompts/v2/es_speaker_2.npz b/bark/assets/prompts/v2/es_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9fafdaaf3d1bf1de2fb685c79c7eafa7f06843cf
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d1c4292dc08db834668d3f93b98d7b2589242408f8027c344c29081d7304da6
+size 26236
diff --git a/bark/assets/prompts/v2/es_speaker_3.npz b/bark/assets/prompts/v2/es_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2757fb847e0f1755bf2012f5a5ac854ff4b2c92d
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57ae62de759e42f45b3e46095a023f5a242f028b856ac6d8bfdb9f61edaf2089
+size 23780
diff --git a/bark/assets/prompts/v2/es_speaker_4.npz b/bark/assets/prompts/v2/es_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..cd59ad463c97266762660b716bcc2575a7d42c30
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:61c863e864973d7c4b30b97af1bfcfdf90a037a576f58c88890e8ff603f2c157
+size 23356
diff --git a/bark/assets/prompts/v2/es_speaker_5.npz b/bark/assets/prompts/v2/es_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..04bf8f6ac5521d653c8d71a315b7a240fa14379d
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:71566bd17e787b472c6705da4b09b5017b3d9e68b274b593bc620bbe7beed6bc
+size 25700
diff --git a/bark/assets/prompts/v2/es_speaker_6.npz b/bark/assets/prompts/v2/es_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..39a0e5d20674ed69a3ca9e8bdbc4f296661bbbc8
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b866852d37c40d526b2f0f19549038cd000d5d33895b4a4ef391494efeb681b2
+size 20580
diff --git a/bark/assets/prompts/v2/es_speaker_7.npz b/bark/assets/prompts/v2/es_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..bf00317a9b7303962d9bbc5da126fe507c752a6a
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d02d4d65e70b4469152ee0ae359ca83d7c8c9824c666047f84fa20889f261cb4
+size 22020
diff --git a/bark/assets/prompts/v2/es_speaker_8.npz b/bark/assets/prompts/v2/es_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0d38cec3b9626904404edcb43f97ffe1927dc47f
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c097c1ee632612aa9c6c839dc0c885832b040a0d8d2bd8749b891429bb609a0f
+size 25436
diff --git a/bark/assets/prompts/v2/es_speaker_9.npz b/bark/assets/prompts/v2/es_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1c4359d68f2d2cd2d443feaed47d77292c8067fd
--- /dev/null
+++ b/bark/assets/prompts/v2/es_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4b77f52b0c18619e948d711e70b76fa57597126d2c22e6bbe409055d4abdec0
+size 19940
diff --git a/bark/assets/prompts/v2/fr_speaker_0.npz b/bark/assets/prompts/v2/fr_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a0675f8dc5b0408e84517ffc1b77950327226c72
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68240e79c5b11d6c1a3d6c0cc47de60fd92040229ffe12b2770a0ded35a62161
+size 45804
diff --git a/bark/assets/prompts/v2/fr_speaker_1.npz b/bark/assets/prompts/v2/fr_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c33f7e13b693439c32acf3cbeef0c143f90370d4
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d87cc3e81a3c166138a80f23614b11ba16f03c662a7974e9f9f3e419203a4228
+size 25700
diff --git a/bark/assets/prompts/v2/fr_speaker_2.npz b/bark/assets/prompts/v2/fr_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..259e17217b278da528a49fff0037171687f4db21
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:265004e0a094d846d655f9f6b8802caee76e1cd4721a53b2c5201f99c9b87edf
+size 52204
diff --git a/bark/assets/prompts/v2/fr_speaker_3.npz b/bark/assets/prompts/v2/fr_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..b48aeda87afa3f1252f04f9512937817993576da
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f5438a83c768b722b75798d3a2696950aa5f86628d522075c7ca13047a0a166
+size 50764
diff --git a/bark/assets/prompts/v2/fr_speaker_4.npz b/bark/assets/prompts/v2/fr_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..ddb90bbcb3a8e21835e9d39c522286d6a8d2ab18
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e07317223ef83c23b380029043a3897487baef441351dc6982d01f6fd0079b9
+size 49908
diff --git a/bark/assets/prompts/v2/fr_speaker_5.npz b/bark/assets/prompts/v2/fr_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0b07103e6488b2d8daeec71a6b4cc82bf408bd12
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:acf899fe1f544a49d5d2acb6d7d770a89f7efc068355e272a8204a343f89b5ce
+size 45108
diff --git a/bark/assets/prompts/v2/fr_speaker_6.npz b/bark/assets/prompts/v2/fr_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..b0bd5dea4b081188a8056f922e4877df559611db
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:224a9b8eeda606850388911d3a82630ba7fa32a276cbfb8cd06399712c7f7ca8
+size 55932
diff --git a/bark/assets/prompts/v2/fr_speaker_7.npz b/bark/assets/prompts/v2/fr_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..05718606139d0b5c8bd9df1e92d2a3cc34343420
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:422d55e1d59cd761cc7386c71b832bfb6a770fe51352d7313785612460c475e8
+size 32524
diff --git a/bark/assets/prompts/v2/fr_speaker_8.npz b/bark/assets/prompts/v2/fr_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..14f6f4863ca68e491fb438f808e37378b493541b
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:929a8e2303508c7602e8f65942844f453a719ede39f5d7357bbee859214ea145
+size 43244
diff --git a/bark/assets/prompts/v2/fr_speaker_9.npz b/bark/assets/prompts/v2/fr_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..625fceeff84119d1c464a992864249f493bb7988
--- /dev/null
+++ b/bark/assets/prompts/v2/fr_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09cc3b4d0cb1d7fc86caa65a36f6cee615d947c367adfe226f947917ec7b08b2
+size 32100
diff --git a/bark/assets/prompts/v2/hi_speaker_0.npz b/bark/assets/prompts/v2/hi_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1a481c40ff390cbfa7c61df81d966ff6df7a28bb
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:12a32c0f2ade5948d850b06069f31dea88c70be841dc2dfbc048af3024d0fd87
+size 32580
diff --git a/bark/assets/prompts/v2/hi_speaker_1.npz b/bark/assets/prompts/v2/hi_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9a22578290b9a23d0b9b107d7417ced7bf0cfd52
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6fea5c5f9555e37c782b88e87db73fa3a1adabf287625cf72c289aeef139a938
+size 25860
diff --git a/bark/assets/prompts/v2/hi_speaker_2.npz b/bark/assets/prompts/v2/hi_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1b65cf9976f0ffeb4ec011f03e650d155b88f076
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5904ab83736410ce52588efa27f61fff59df21e186508aabbe015b3db78d4d40
+size 27780
diff --git a/bark/assets/prompts/v2/hi_speaker_3.npz b/bark/assets/prompts/v2/hi_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..01a2bab3858d106b0cef25a04020d5afd6cdcc53
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d40a8eda9ed64d9294b4bb85922d2dde48d8be5e5bddfcfc400bc5f6442b5178
+size 29804
diff --git a/bark/assets/prompts/v2/hi_speaker_4.npz b/bark/assets/prompts/v2/hi_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..427927394b79e6b9586146b655ea8b35ae381277
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09dbe02c981d79ea9b4794553fb54d6a05d573a5e8f2bace8a78f7ebee65878a
+size 25380
diff --git a/bark/assets/prompts/v2/hi_speaker_5.npz b/bark/assets/prompts/v2/hi_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..6b6197a9494cd656a8072766bb2344039019b88d
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cba5c6f62757ddf29502e5cdb7ad184edc0bb7b6b05ec6020ee3fb2c404d7642
+size 51404
diff --git a/bark/assets/prompts/v2/hi_speaker_6.npz b/bark/assets/prompts/v2/hi_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..62d8127e826c11c2710f9ebe8138a40360980512
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f098561145b8f2755089b94753db97525e1427efd3b5a9daf923990abd04828
+size 26396
diff --git a/bark/assets/prompts/v2/hi_speaker_7.npz b/bark/assets/prompts/v2/hi_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..aaaae9306427e67be00c4b1d0b0f34a455344b3e
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b92ed8ca28d71a3fc720a99386467447fc8753e7bc8b4d8ff7cd765c835b467
+size 29380
diff --git a/bark/assets/prompts/v2/hi_speaker_8.npz b/bark/assets/prompts/v2/hi_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..7d4d8d7c3f24de34791a9ad98a76f75ebf2db131
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc8e094f5d6b3109b6a8a5f040db0f2e3f214f90ebd7708e19c6db88aabdaeca
+size 39404
diff --git a/bark/assets/prompts/v2/hi_speaker_9.npz b/bark/assets/prompts/v2/hi_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0af99a71f8a65d4c2dc0df15f820d59be8d59db3
--- /dev/null
+++ b/bark/assets/prompts/v2/hi_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2dbf6c82c5eac9d412c50ae76a42525d28c64c08900d191e1838cbeca4b133a1
+size 23516
diff --git a/bark/assets/prompts/v2/it_speaker_0.npz b/bark/assets/prompts/v2/it_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..debaa79a85e3ec8592da7e8464867bdb6331ac4e
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad1d1194448c69b8b6e5e6a56cbbd7ebf1d2f654e89a0b773702124b8dcec799
+size 28740
diff --git a/bark/assets/prompts/v2/it_speaker_1.npz b/bark/assets/prompts/v2/it_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..72e1d748940bdc3c11818af62f28bfd6e59b0fab
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:838b990eb7e86d463ebbec70926aad5bea178c902065f2a48818c86c9a2056be
+size 33804
diff --git a/bark/assets/prompts/v2/it_speaker_2.npz b/bark/assets/prompts/v2/it_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..88ce1d8ec7773380a5bf03ed9387cffdf0077129
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26cefcec88aab6adc0d159e0ca34e1e97fec8a1f240af11fa6f3f321f789e787
+size 40788
diff --git a/bark/assets/prompts/v2/it_speaker_3.npz b/bark/assets/prompts/v2/it_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..83100eda7b2af732dab73f160f1266e8671b3051
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fc35eea4020470fbddfea5c3948115fdc7eb122d4f88ea20f641bde5f71a1133
+size 30764
diff --git a/bark/assets/prompts/v2/it_speaker_4.npz b/bark/assets/prompts/v2/it_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1621505e856d21a72b5d56535cdd355de4052694
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9fe9d9de363305e7dbe8d2aa5311c552260e42c4a94a4b5a40c78a1b47d44689
+size 28740
diff --git a/bark/assets/prompts/v2/it_speaker_5.npz b/bark/assets/prompts/v2/it_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a5306af173c1319f1de9f4e615ee525c2a29f6b0
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:24314dc0e83d78278eccb55cdb8fab18b80019fcf8b9e819a21916dbe20e61cd
+size 30444
diff --git a/bark/assets/prompts/v2/it_speaker_6.npz b/bark/assets/prompts/v2/it_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..94e1949c29255fc401a107af97d6a9bb54268b32
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:955873c0b1f5736d5b5597d70a9520bc61002c472c4714a2821714c5f4ef3b70
+size 29644
diff --git a/bark/assets/prompts/v2/it_speaker_7.npz b/bark/assets/prompts/v2/it_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..ea271eaf5d6c3471585b66ed41fe3cde358f3c9a
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0809a63254b1f8ffa4f948d400e8be7c8cd8e728da5fc25e8add8120f2af5533
+size 43724
diff --git a/bark/assets/prompts/v2/it_speaker_8.npz b/bark/assets/prompts/v2/it_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..84a45070bca2f3e20935a14374fb8559f469b465
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0346a9e3f1772be3dbd656fda5fc0768eb1e9623e2afaf7e37acc0f88c5c266b
+size 42708
diff --git a/bark/assets/prompts/v2/it_speaker_9.npz b/bark/assets/prompts/v2/it_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0711fa95af43a4f8f71fcd529b36e37cbb251b5e
--- /dev/null
+++ b/bark/assets/prompts/v2/it_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55cd34677e80f3ee87ae089e6a6c5378c5440039ca9a898f631dd62d21192b09
+size 37644
diff --git a/bark/assets/prompts/v2/ja_speaker_0.npz b/bark/assets/prompts/v2/ja_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..3efa1e84712165e47f9f566a60abdafd5698b0ae
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9fed62585fb3e36b8f9bc390c7e897f7abe2dd4b13308ef37bd8f83b4fd13c4a
+size 24420
diff --git a/bark/assets/prompts/v2/ja_speaker_1.npz b/bark/assets/prompts/v2/ja_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a7846bdedcda1157f741882dcb7a77f5028715fe
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7fb54f924f2a14de55b616d466ff795cef7b18709bc9091cb2c2dd10ec5060d3
+size 31244
diff --git a/bark/assets/prompts/v2/ja_speaker_2.npz b/bark/assets/prompts/v2/ja_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..030b2a7b3b3443f1a73ee3acbba63e3bd3dadc6d
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52dac33a71598d8c0e5044233f589f97cf7d93b09d46974b4351e0bfaf425d73
+size 24100
diff --git a/bark/assets/prompts/v2/ja_speaker_3.npz b/bark/assets/prompts/v2/ja_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..31b217397f2079dfeab46dab70c3d6202a4a1bb7
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:615194f254981be6d55130a0e557092c6850a80ed36ca8cbcf835e73bfaf8036
+size 24476
diff --git a/bark/assets/prompts/v2/ja_speaker_4.npz b/bark/assets/prompts/v2/ja_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2f0c9d95e052d04adc545efb4ecc4093acfbcbf8
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:13dacf5605b1a94bf6a739523e3d5f771d793d401c699cce259e5b7560fb1986
+size 26716
diff --git a/bark/assets/prompts/v2/ja_speaker_5.npz b/bark/assets/prompts/v2/ja_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a6a8032f4876fa7801ff875ce8d604cb92fb3968
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:42d80ce210df3f10373652ef35a554e14038ee09d8d7367b4aebbf2fbc45ef95
+size 24956
diff --git a/bark/assets/prompts/v2/ja_speaker_6.npz b/bark/assets/prompts/v2/ja_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..fb29cbc5e082a1e0acd5e13bbe843bebf703d65c
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8654c0f4a6331a40586b3b265b7ad3224ed9884362b96b3ce8d16fc009ea56f4
+size 40788
diff --git a/bark/assets/prompts/v2/ja_speaker_7.npz b/bark/assets/prompts/v2/ja_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..e56891982dc0963f35ba811628e66e37c20aea03
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d892d981d73e5a7b388d388bb68266ca7aca158ab5182e82108f454f1e0e7d07
+size 25060
diff --git a/bark/assets/prompts/v2/ja_speaker_8.npz b/bark/assets/prompts/v2/ja_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a8cad1b14001c773b92a4022a245236718030bbd
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03a17b4f638f01d3bb4c224041c243e747d2c4bf10ac8d03f6daff5475a1a99c
+size 20260
diff --git a/bark/assets/prompts/v2/ja_speaker_9.npz b/bark/assets/prompts/v2/ja_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..bb13eca0c7c51749477afd0221cc4daf89d64958
--- /dev/null
+++ b/bark/assets/prompts/v2/ja_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c7547365d4d3a27572843ed3899ea28a43fb05239c1bce9d005db6da143cd8a
+size 31140
diff --git a/bark/assets/prompts/v2/ko_speaker_0.npz b/bark/assets/prompts/v2/ko_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..5404666dbd515dbf23df66450d352757733eb536
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe31a6881986dcc0d75d3142825aa98b5f642ec3d3ae4385bfa7accbf20c4a26
+size 26556
diff --git a/bark/assets/prompts/v2/ko_speaker_1.npz b/bark/assets/prompts/v2/ko_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..379c1f9c4135d05913b7a797256d647bd1e6bebe
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f987c09b64025759da9af1a4443fb5b6c9675a7b0dd50f5de4231e57ed9e81e
+size 26340
diff --git a/bark/assets/prompts/v2/ko_speaker_2.npz b/bark/assets/prompts/v2/ko_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1df0bcf6b8283ce260470ee3810f24b1ab2837b9
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e3aac1bf04c2c8aae49dbd4dd4f2ccac346e6726b22563b23f87d2506bd930f
+size 19196
diff --git a/bark/assets/prompts/v2/ko_speaker_3.npz b/bark/assets/prompts/v2/ko_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..6e3b48080c53a74cd4c367716f16c41c194b8d85
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4974e0659fcd9aaa72514ac146b7ad5e9696773c18549a3a0797802f5db70955
+size 39564
diff --git a/bark/assets/prompts/v2/ko_speaker_4.npz b/bark/assets/prompts/v2/ko_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d5e068b5443d6912c69c1521b8fdd6ee8d326dde
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eeb9837e9ad0214859a1fcb71c5d7413ca60be55a4e611228f28aa434feea3a7
+size 23140
diff --git a/bark/assets/prompts/v2/ko_speaker_5.npz b/bark/assets/prompts/v2/ko_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d98b54b310de4b60a008f9d0265bc29016207739
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c1fd5319c1f6366cc52c99517c2f951adbd5ac67b76829fc06f9be367b79132d
+size 23196
diff --git a/bark/assets/prompts/v2/ko_speaker_6.npz b/bark/assets/prompts/v2/ko_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..7985bdf3fa3e30d046b62ac354cd426d6dd2da7e
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:328d17d8918e9a732d9b3f07228c2f875843097632a6e06f822613c8bc5b48df
+size 26396
diff --git a/bark/assets/prompts/v2/ko_speaker_7.npz b/bark/assets/prompts/v2/ko_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..deb134e30a69034868ce49de85ac557aebbddb15
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e79f5aa446e1cdb3a2c3c752a76f68e0a6a890d5605bda325bfeff9dd69cf887
+size 27884
diff --git a/bark/assets/prompts/v2/ko_speaker_8.npz b/bark/assets/prompts/v2/ko_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..57f2487e7da571930ce97c39ac1c210324673abc
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99ed04ad3deb6b981976e14df0abc4933c663fb0fb3eb015acbceb450a48da0d
+size 31140
diff --git a/bark/assets/prompts/v2/ko_speaker_9.npz b/bark/assets/prompts/v2/ko_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..267053615c88bf0ec0271dfb604d4de9a07ee3ea
--- /dev/null
+++ b/bark/assets/prompts/v2/ko_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b7f91e06ecf5886a52f73801ea5be894f1b6cb3d67da7738fa8873b618b7535
+size 23676
diff --git a/bark/assets/prompts/v2/pl_speaker_0.npz b/bark/assets/prompts/v2/pl_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..328efc7d637d2ac47e72bed3bf6ae0fd554bbb8c
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb026490fef1bad75b5f193eacbe8c99647a0faf17e9583827f4a93c71817439
+size 24900
diff --git a/bark/assets/prompts/v2/pl_speaker_1.npz b/bark/assets/prompts/v2/pl_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f1745a236e6950d4ab4ede58b47bd8eb8b29e664
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8daa9e0e55537b05756136e984f9484a21239e9d49a4567e9e695cc281ce9f6a
+size 34660
diff --git a/bark/assets/prompts/v2/pl_speaker_2.npz b/bark/assets/prompts/v2/pl_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..aad28a11aa36f16e47310c70d77823a4cf4f0588
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:88fd57a7a0c9a5d7875cd91d28b672085dde12109347b49b095adcf7658d52c4
+size 28580
diff --git a/bark/assets/prompts/v2/pl_speaker_3.npz b/bark/assets/prompts/v2/pl_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4ba807fc90397b43b9508df585b2e0bafb26ac3b
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:995f847a79b8dce9bf2009e0cef74e660d4697e90ee9b445f21ca1d817fa7ba9
+size 41428
diff --git a/bark/assets/prompts/v2/pl_speaker_4.npz b/bark/assets/prompts/v2/pl_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1d74cf49cdf7989c4b0bc8f273990cff60e1c976
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23635e04802c46eaadf86295d21d61e19ecb0969402d8ad78e141f04fb2eb1c9
+size 30764
diff --git a/bark/assets/prompts/v2/pl_speaker_5.npz b/bark/assets/prompts/v2/pl_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..437a96776e8ba20c68ca0d9a3516992a107e5b4c
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b294acb6154c9f145535b5a23602c34e635e63755a7e76e8ce672e8fa8901774
+size 38180
diff --git a/bark/assets/prompts/v2/pl_speaker_6.npz b/bark/assets/prompts/v2/pl_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..b1bd86a276cd619658fa868f5b699a89144c7b35
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60853df4aec0a0365c8515ba4d498a131abefab3f36aadcdb198048423b3ae45
+size 38820
diff --git a/bark/assets/prompts/v2/pl_speaker_7.npz b/bark/assets/prompts/v2/pl_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..faace4b209511c6ea096cd80c6584573322eb9da
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b52e278832532ee7a5260e51db6337971231637d81f8e258834884a845b93f67
+size 29060
diff --git a/bark/assets/prompts/v2/pl_speaker_8.npz b/bark/assets/prompts/v2/pl_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..ffea7377274b9cf82aaaad1fe6f952a73a74c7db
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b65bbcbad148b24ff25625cc952d527c4143614c0bcc5176689ef2991971fe2
+size 19460
diff --git a/bark/assets/prompts/v2/pl_speaker_9.npz b/bark/assets/prompts/v2/pl_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..3cb6ca960151ede664e43a74675df0c972430eb7
--- /dev/null
+++ b/bark/assets/prompts/v2/pl_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34d9f70e6118e07822d8413cf6c88c2c9a43efaa5f581d855b71747ff3c0479d
+size 30980
diff --git a/bark/assets/prompts/v2/pt_speaker_0.npz b/bark/assets/prompts/v2/pt_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9255b6e2db1251052550a2992afbe0c0aa0ee823
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91642af464c5ad73a480907a2b8f85fd5755d4b2127586a23e916204a37e6330
+size 27724
diff --git a/bark/assets/prompts/v2/pt_speaker_1.npz b/bark/assets/prompts/v2/pt_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..e41be26e61abe3bdc8952139cb8819bad6b80f50
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fbaba67cbfc0fc2b0ded04b533e6c67d2b0f77670b81afa3bb6391360c7b5834
+size 34500
diff --git a/bark/assets/prompts/v2/pt_speaker_2.npz b/bark/assets/prompts/v2/pt_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d9814fdc19ffeaa100038bc69e8b174e7312ee46
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:196def1c1c743569b62b58ce6d5d3f36f43ee3c7e7ceef5548ac2b1a512e610b
+size 36844
diff --git a/bark/assets/prompts/v2/pt_speaker_3.npz b/bark/assets/prompts/v2/pt_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c18d03eeb88da2d4ecb2f7ce7c5195ff81be3810
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ebae3bec99e8ba38df8afc1727f4f54ec331e3da6ce5d3a1c0f9b35bbb1a2844
+size 26980
diff --git a/bark/assets/prompts/v2/pt_speaker_4.npz b/bark/assets/prompts/v2/pt_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..3365618d8bcf3b32a1ab7d3d22b623d4e0cde8f4
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c93c38105050f658b93d3d076c3e0bb7088e0b2f4550f7b27d5fe5cce8c35bf1
+size 26396
diff --git a/bark/assets/prompts/v2/pt_speaker_5.npz b/bark/assets/prompts/v2/pt_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2a3c339918a505c339e1724dd00f1a8a0d134547
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bf4984719aad20c9ef021657c83b21233584a9854981152b63defa3f2936401c
+size 28260
diff --git a/bark/assets/prompts/v2/pt_speaker_6.npz b/bark/assets/prompts/v2/pt_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d29a56ae7775d820dbdf35c77fd9e797b4a32142
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a4169f290ef7601d2c586975ced73d8d6545ac1f62ec78a3c8c1ac5f6535eaf
+size 30764
diff --git a/bark/assets/prompts/v2/pt_speaker_7.npz b/bark/assets/prompts/v2/pt_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..bb6cbe9389e18540a2ccb2b04530273458949862
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a65441bcee2bf83e0c34260bcd11bb83a5c147bf4da8692a3a6392fe643aa8b8
+size 28100
diff --git a/bark/assets/prompts/v2/pt_speaker_8.npz b/bark/assets/prompts/v2/pt_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..45835e8ac2efc24068143f8dc6d4d776c8340bb8
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03164847ec1161b354827950ccbb7e7082d638f8fa28c7d3ffbe936fff44d7de
+size 28524
diff --git a/bark/assets/prompts/v2/pt_speaker_9.npz b/bark/assets/prompts/v2/pt_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..7e7a299428cefcb4ae2ed92e6529abd789bd3b97
--- /dev/null
+++ b/bark/assets/prompts/v2/pt_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8558c022cd0aef90b214085ec098e2350261059ce0c1fe199935237421f1ade2
+size 39780
diff --git a/bark/assets/prompts/v2/ru_speaker_0.npz b/bark/assets/prompts/v2/ru_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..66447e1e52f0249dd8fb469bd0ef9ae00b0705a3
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6405a5dc1746dfd97aaa51b2a3cea4853dd4fae0dcb3f378a4734c29c50930bd
+size 39884
diff --git a/bark/assets/prompts/v2/ru_speaker_1.npz b/bark/assets/prompts/v2/ru_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..123d48373801a84dda4cb4311a606a5599708f2f
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01fa66fd2ec3caf72e7a7c3db78f42690c53c175599d00d4ea72694d35d5fa61
+size 56628
diff --git a/bark/assets/prompts/v2/ru_speaker_2.npz b/bark/assets/prompts/v2/ru_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..b835c6629f143e9423ae1de96febee41c202be7b
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a49d5dbe3d688232ec91803f184004abff8903dd550c24800b86df82547ec31f
+size 29220
diff --git a/bark/assets/prompts/v2/ru_speaker_3.npz b/bark/assets/prompts/v2/ru_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..eaf483f3cb7a3d7575731b2336d0027cd23135d1
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:613b64d5e1296e46a2250c1ddb3b07264edfff91380871d418dd729eaf223706
+size 19940
diff --git a/bark/assets/prompts/v2/ru_speaker_4.npz b/bark/assets/prompts/v2/ru_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..5212b894a12f68cf22477bd0d9dc394032cffc11
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a96d957516c6fb0e554b1a8fae548c6bc21646f202fd0d4c540ea421dc0b0c7
+size 28204
diff --git a/bark/assets/prompts/v2/ru_speaker_5.npz b/bark/assets/prompts/v2/ru_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..0db2d67581b807f75b3156cb72d690c6698d164b
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72d472cff7af811dd3c3ae18b46c1ad5ead70b28acba00c3b9bd7d117fe67624
+size 44628
diff --git a/bark/assets/prompts/v2/ru_speaker_6.npz b/bark/assets/prompts/v2/ru_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..123b4639e3a1ac431444d9b148d32381124c0485
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2160d4938f61921405bff3ac69c0168a65722197b8ee41379685f415f7fb40cd
+size 20476
diff --git a/bark/assets/prompts/v2/ru_speaker_7.npz b/bark/assets/prompts/v2/ru_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c99218eee3920194b748ac4816be5f0d2ee9818b
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:941a776f7b1d50d17d1ee23373d4dfc7a9f1a5395177301f847e0a22a5f00628
+size 26020
diff --git a/bark/assets/prompts/v2/ru_speaker_8.npz b/bark/assets/prompts/v2/ru_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8932a9c04a4e16ba5cb029846f2a76a6114a3c92
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1cc916253b1e226616d5b81e0d3fec66c0f26a2ba1ae956f30351f12a9b7a2f1
+size 39084
diff --git a/bark/assets/prompts/v2/ru_speaker_9.npz b/bark/assets/prompts/v2/ru_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1f060fca4857396b7fa6e100eb23dd4f772b1acb
--- /dev/null
+++ b/bark/assets/prompts/v2/ru_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abd862b60db01516bcf224033a15c207c25fbdaae994b4748163ad0af697059f
+size 34660
diff --git a/bark/assets/prompts/v2/tr_speaker_0.npz b/bark/assets/prompts/v2/tr_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..5b404c97118a2546d5bc7be0d76677bf96294d2a
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6985d2931a9310cf86b1938a6f06d23b0faf8579186f2ecf3076bb275881064e
+size 22076
diff --git a/bark/assets/prompts/v2/tr_speaker_1.npz b/bark/assets/prompts/v2/tr_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2064b62a0265e616fd4a5a1e8fab4e47bc2e264d
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e34fac6bd9b18f28913d5c55f544caf10ad449ddada3d7c96556d11207569cfa
+size 24476
diff --git a/bark/assets/prompts/v2/tr_speaker_2.npz b/bark/assets/prompts/v2/tr_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..6c608086a22e2931cd31e6a90586b32b9cece557
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d8822c6c75198b511e682d6235493e559c4b52a0324d6ac5f5a2d253a78dd019
+size 24956
diff --git a/bark/assets/prompts/v2/tr_speaker_3.npz b/bark/assets/prompts/v2/tr_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..44415d5bc19983f6e9a9cd350c38b00161617a0f
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d00f600343a9b7f3c556aa7778f29b573455b0611dfc1a194dc46304908839fc
+size 28684
diff --git a/bark/assets/prompts/v2/tr_speaker_4.npz b/bark/assets/prompts/v2/tr_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f2b76f974c0bcc98dd81add7a796dc7bf7faffa2
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:46ecfff5873b47e4799e9265468c2d15e3737caec7c370a13c37a12d255ff11f
+size 33164
diff --git a/bark/assets/prompts/v2/tr_speaker_5.npz b/bark/assets/prompts/v2/tr_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..64cfd45c0ee36c1d471bda53e677ee1718accd49
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f9bbc5d67bc37d21e02f4620ecd0aaddbe6b0e7a29560e759b042d2823ec21b
+size 17220
diff --git a/bark/assets/prompts/v2/tr_speaker_6.npz b/bark/assets/prompts/v2/tr_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..398e50ea6d26267f6a4eae3d509ced69c15b2c1a
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b46f0589a78a5e40877e3fc017974dfb679068690d49a82824e6847952510732
+size 25276
diff --git a/bark/assets/prompts/v2/tr_speaker_7.npz b/bark/assets/prompts/v2/tr_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..6820267776e1560f638b4cc4f89d477ff236417a
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c2eac63412873fa0d0e65c4018f7ff7df02f691bc6f389c5675af94cdea3623
+size 20260
diff --git a/bark/assets/prompts/v2/tr_speaker_8.npz b/bark/assets/prompts/v2/tr_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..acd6321658c55069079a840c261d0f38ba87f662
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a8af6e8790b0cb2622edd1ff31df98f4c32f06bb1ee60a41ed6dce69f2b1b48d
+size 20580
diff --git a/bark/assets/prompts/v2/tr_speaker_9.npz b/bark/assets/prompts/v2/tr_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1f652e09362947e730f754ec36e02c3b16a017f7
--- /dev/null
+++ b/bark/assets/prompts/v2/tr_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d5faf0d1cf51b1ab8e6138bb8dd70f5e5122c41bc6fbf8a0536bcbee4f1963ee
+size 28204
diff --git a/bark/assets/prompts/v2/zh_speaker_0.npz b/bark/assets/prompts/v2/zh_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c0da0dd19dee7ea7045b24af8b5ef979b3967d99
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd7ac118a3e944b3f20c89f2446056a00850a630ee16318922acc6572ce80929
+size 20636
diff --git a/bark/assets/prompts/v2/zh_speaker_1.npz b/bark/assets/prompts/v2/zh_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a41097e8fadddf15777cf8e4433602eeaee81e52
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0eacf5c862dfd3c5ac825f2ebb26f323e64309cb712e7e264cbd31c5bca3f038
+size 19836
diff --git a/bark/assets/prompts/v2/zh_speaker_2.npz b/bark/assets/prompts/v2/zh_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4fca832724ff2da321f2ef129e224d524075690d
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e324b47f8250e5798c314f395d4e049575e7ca369d0b6074e91c7bba70e9f26d
+size 21060
diff --git a/bark/assets/prompts/v2/zh_speaker_3.npz b/bark/assets/prompts/v2/zh_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..cd1d101a472fd9dcfa3c6d374f5099e42a002e73
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:98c476abc7bf634ffb2d71d363284e7bd8c8abd5e33ec5ca21d4aa5b15730d18
+size 31300
diff --git a/bark/assets/prompts/v2/zh_speaker_4.npz b/bark/assets/prompts/v2/zh_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8c2c94f8f02f8fc8ee490fd1174195634a28ab67
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fa8673a9895ad3302d13ac94193b5ad5da481f1cc276e6181fa895acaae133b
+size 29964
diff --git a/bark/assets/prompts/v2/zh_speaker_5.npz b/bark/assets/prompts/v2/zh_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f2269a6bc79a059214486a5a346e2890bb355b95
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:226edfe5fabc72eeb83a13e350599bc8babe5adc2264b3cdb661fd1258dc4044
+size 17436
diff --git a/bark/assets/prompts/v2/zh_speaker_6.npz b/bark/assets/prompts/v2/zh_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..76a4891df92e084fbd3c1e7c19682ad155694efe
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:285d51fbe81cc263636b5b487fbb6633e6f3cf92c53ca9ab8e6b7f55d4b4a31d
+size 16900
diff --git a/bark/assets/prompts/v2/zh_speaker_7.npz b/bark/assets/prompts/v2/zh_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..7d4d635ffe13e4f9a21e9d5b8f514f9db4f1ebab
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0967cdb14ffa79895747b0d52df9f15bdad80d6c55b7630894345c9a7ec87c91
+size 21060
diff --git a/bark/assets/prompts/v2/zh_speaker_8.npz b/bark/assets/prompts/v2/zh_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1ea29786a479ff5fe94822fee1e00a6484c8bec3
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c028f78530013f29ab8c0c1cf4fe2138106fbe5252951f5f36e0168056779549
+size 19300
diff --git a/bark/assets/prompts/v2/zh_speaker_9.npz b/bark/assets/prompts/v2/zh_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..caf80d75d736fd7a8c0a8febdd23d2e99449896b
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6265bb827008d7af8a45a8e057fe3e91efb347d56208180a9ed990ad54e4d75e
+size 16156
diff --git a/bark/assets/prompts/zh_speaker_0.npz b/bark/assets/prompts/zh_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4c531fb26cfafea44a9a1e90b4efe0ee4a79dc4e
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55bc30061b5c5928454e4c7a1d6206e359a25ca38fec3ca96de0a625fa96c572
+size 19620
diff --git a/bark/assets/prompts/zh_speaker_1.npz b/bark/assets/prompts/zh_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d858600f97683c44cd72ccfa8badfa3b189f0467
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d5d5531998bd91684806eb64a2ac659d8c242f4112d6216697d3cae0b99b978
+size 21380
diff --git a/bark/assets/prompts/zh_speaker_2.npz b/bark/assets/prompts/zh_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2452ed7bcab190bedde76dc7d7d3fe4d82643278
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3001ff8a04e64e0687b0ad145c92684c8758ce7af68fb330dcfee4739fd896b
+size 19460
diff --git a/bark/assets/prompts/zh_speaker_3.npz b/bark/assets/prompts/zh_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..29d23b0a1795126c86f735f3e5f8af17de9184b5
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:08b20f307ff4a1e5a947f4394ce2f2c3c5e0e6a9f78e0fd77604fb08359ab90d
+size 32740
diff --git a/bark/assets/prompts/zh_speaker_4.npz b/bark/assets/prompts/zh_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d12adb15a7bc72de351c53b046a6edbb46713cd4
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b6acddfa41ce84e558e09e91fae5fbb01704bc1cef0f000bcc7f30d05e51afc
+size 19676
diff --git a/bark/assets/prompts/zh_speaker_5.npz b/bark/assets/prompts/zh_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1662063711535dffe2ec4c0711e940ca0bd78a7b
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:048c7362b237c43ceb0c3a4986b5c42c21ef013cadaf7c77b6348419f801dc93
+size 54548
diff --git a/bark/assets/prompts/zh_speaker_6.npz b/bark/assets/prompts/zh_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9877675833fe910b4fd15b6938e35a8bf1434073
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d7359be4a984930a81103043409b695e383d493f4edd6d4786537b1730a95c0
+size 23516
diff --git a/bark/assets/prompts/zh_speaker_7.npz b/bark/assets/prompts/zh_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f83e4af9176bc23fb0dbafaeadd0c3f24dcb14e4
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:560ccbd20b16a2313cdc44ed578c8fb4dcbe51c2d1c57756dc242d185a6b88d3
+size 22556
diff --git a/bark/assets/prompts/zh_speaker_8.npz b/bark/assets/prompts/zh_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..dff9d012159fd857ba4070c99fb96a66a8c8de41
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26eb3e2589f21f88aa963f052cc5134c6510b1cdb0033be277733bc7dc77157c
+size 20580
diff --git a/bark/assets/prompts/zh_speaker_9.npz b/bark/assets/prompts/zh_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..98fc91445386fe8ea4aabe7a9172d10e4298b557
--- /dev/null
+++ b/bark/assets/prompts/zh_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15ab7bbb47bf326e454cc1d299f4069d0fa9ea8e934273dbed4cbf1116404322
+size 18396
diff --git a/bark/cli.py b/bark/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..924af2271bd2b698b251cfd37ff05ab44bfb96f7
--- /dev/null
+++ b/bark/cli.py
@@ -0,0 +1,71 @@
+import argparse
+from typing import Dict, Optional, Union
+import os
+
+from scipy.io.wavfile import write as write_wav
+from .api import generate_audio
+from .generation import SAMPLE_RATE
+
+
+def cli():
+ """Commandline interface."""
+ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument("--text", type=str, help="text to be turned into audio")
+ parser.add_argument(
+ "--output_filename",
+ type=str,
+ default="bark_generation.wav",
+ help="output audio file name",
+ )
+ parser.add_argument("--output_dir", type=str, default=".", help="directory to save the outputs")
+ parser.add_argument(
+ "--history_prompt",
+ type=str,
+ default=None,
+ help="history choice for audio cloning, be path to the .npz file.",
+ )
+ parser.add_argument(
+ "--text_temp",
+ default=0.7,
+ type=float,
+ help="generation temperature (1.0 more diverse, 0.0 more conservative)",
+ )
+ parser.add_argument(
+ "--waveform_temp",
+ default=0.7,
+ type=float,
+ help="generation temperature (1.0 more diverse, 0.0 more conservative)",
+ )
+ parser.add_argument("--silent", default=False, type=bool, help="disable progress bar")
+ parser.add_argument(
+ "--output_full",
+ default=False,
+ type=bool,
+ help="return full generation to be used as a history prompt",
+ )
+
+ args = vars(parser.parse_args())
+ input_text: str = args.get("text")
+ output_filename: str = args.get("output_filename")
+ output_dir: str = args.get("output_dir")
+ history_prompt: str = args.get("history_prompt")
+ text_temp: float = args.get("text_temp")
+ waveform_temp: float = args.get("waveform_temp")
+ silent: bool = args.get("silent")
+ output_full: bool = args.get("output_full")
+
+ try:
+ os.makedirs(output_dir, exist_ok=True)
+ generated_audio = generate_audio(
+ input_text,
+ history_prompt=history_prompt,
+ text_temp=text_temp,
+ waveform_temp=waveform_temp,
+ silent=silent,
+ output_full=output_full,
+ )
+ output_file_path = os.path.join(output_dir, output_filename)
+ write_wav(output_file_path, SAMPLE_RATE, generated_audio)
+ print(f"Done! Output audio file is saved at: '{output_file_path}'")
+ except Exception as e:
+ print(f"Oops, an error occurred: {e}")
diff --git a/bark/generation.py b/bark/generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..57903f719c478800e232c49852ea93a101846c17
--- /dev/null
+++ b/bark/generation.py
@@ -0,0 +1,850 @@
+import contextlib
+import gc
+import os
+import re
+import requests
+import gc
+import sys
+
+from encodec import EncodecModel
+import funcy
+import logging
+import numpy as np
+from scipy.special import softmax
+import torch
+import torch.nn.functional as F
+import tqdm
+from transformers import BertTokenizer
+from huggingface_hub import hf_hub_download, hf_hub_url
+
+from .model import GPTConfig, GPT
+from .model_fine import FineGPT, FineGPTConfig
+from .settings import initenv
+
+initenv(sys.argv)
+global_force_cpu = os.environ.get("BARK_FORCE_CPU", False)
+if (
+ global_force_cpu != True and
+ torch.cuda.is_available() and
+ hasattr(torch.cuda, "amp") and
+ hasattr(torch.cuda.amp, "autocast") and
+ hasattr(torch.cuda, "is_bf16_supported") and
+ torch.cuda.is_bf16_supported()
+):
+ autocast = funcy.partial(torch.cuda.amp.autocast, dtype=torch.bfloat16)
+else:
+ @contextlib.contextmanager
+ def autocast():
+ yield
+
+
+# hold models in global scope to lazy load
+global models
+models = {}
+
+global models_devices
+models_devices = {}
+
+
+CONTEXT_WINDOW_SIZE = 1024
+
+SEMANTIC_RATE_HZ = 49.9
+SEMANTIC_VOCAB_SIZE = 10_000
+
+CODEBOOK_SIZE = 1024
+N_COARSE_CODEBOOKS = 2
+N_FINE_CODEBOOKS = 8
+COARSE_RATE_HZ = 75
+
+SAMPLE_RATE = 24_000
+
+
+SUPPORTED_LANGS = [
+ ("English", "en"),
+ ("German", "de"),
+ ("Spanish", "es"),
+ ("French", "fr"),
+ ("Hindi", "hi"),
+ ("Italian", "it"),
+ ("Japanese", "ja"),
+ ("Korean", "ko"),
+ ("Polish", "pl"),
+ ("Portuguese", "pt"),
+ ("Russian", "ru"),
+ ("Turkish", "tr"),
+ ("Chinese", "zh"),
+]
+
+ALLOWED_PROMPTS = {"announcer"}
+for _, lang in SUPPORTED_LANGS:
+ for prefix in ("", f"v2{os.path.sep}"):
+ for n in range(10):
+ ALLOWED_PROMPTS.add(f"{prefix}{lang}_speaker_{n}")
+
+
+logger = logging.getLogger(__name__)
+
+
+CUR_PATH = os.path.dirname(os.path.abspath(__file__))
+
+
+#default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
+#CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
+#CACHE_DIR = os.path.join(os.getcwd(), "models"
+CACHE_DIR = "./models"
+
+
+def _cast_bool_env_var(s):
+ return s.lower() in ('true', '1', 't')
+
+USE_SMALL_MODELS = _cast_bool_env_var(os.environ.get("SUNO_USE_SMALL_MODELS", "False"))
+GLOBAL_ENABLE_MPS = _cast_bool_env_var(os.environ.get("SUNO_ENABLE_MPS", "False"))
+OFFLOAD_CPU = _cast_bool_env_var(os.environ.get("SUNO_OFFLOAD_CPU", "False"))
+
+REMOTE_MODEL_PATHS = {
+ "text_small": {
+ "repo_id": "suno/bark",
+ "file_name": "text.pt",
+ },
+ "coarse_small": {
+ "repo_id": "suno/bark",
+ "file_name": "coarse.pt",
+ },
+ "fine_small": {
+ "repo_id": "suno/bark",
+ "file_name": "fine.pt",
+ },
+ "text": {
+ "repo_id": "suno/bark",
+ "file_name": "text_2.pt",
+ },
+ "coarse": {
+ "repo_id": "suno/bark",
+ "file_name": "coarse_2.pt",
+ },
+ "fine": {
+ "repo_id": "suno/bark",
+ "file_name": "fine_2.pt",
+ },
+}
+
+
+if not hasattr(torch.nn.functional, 'scaled_dot_product_attention') and torch.cuda.is_available():
+ logger.warning(
+ "torch version does not support flash attention. You will get faster" +
+ " inference speed by upgrade torch to newest nightly version."
+ )
+
+
+def grab_best_device(use_gpu=True):
+ if torch.cuda.device_count() > 0 and use_gpu:
+ device = "cuda"
+ elif torch.backends.mps.is_available() and use_gpu and GLOBAL_ENABLE_MPS:
+ device = "mps"
+ else:
+ device = "cpu"
+ return device
+
+
+def _get_ckpt_path(model_type, use_small=False):
+ key = model_type
+ if use_small or USE_SMALL_MODELS:
+ key += "_small"
+ return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]["file_name"])
+
+"""
+def _download(from_hf_path, file_name, destfilename):
+ os.makedirs(CACHE_DIR, exist_ok=True)
+ hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR, local_dir_use_symlinks=False)
+ # Bug in original repo? Downloaded name differs from expected...
+ if not os.path.exists(destfilename):
+ localname = os.path.join(CACHE_DIR, file_name)
+ os.rename(localname, destfilename)
+"""
+def _download(from_hf_path, file_name):
+ os.makedirs(CACHE_DIR, exist_ok=True)
+ hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR)
+
+
+class InferenceContext:
+ def __init__(self, benchmark=False):
+ # we can't expect inputs to be the same length, so disable benchmarking by default
+ self._chosen_cudnn_benchmark = benchmark
+ self._cudnn_benchmark = None
+
+ def __enter__(self):
+ self._cudnn_benchmark = torch.backends.cudnn.benchmark
+ torch.backends.cudnn.benchmark = self._chosen_cudnn_benchmark
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ torch.backends.cudnn.benchmark = self._cudnn_benchmark
+
+
+if torch.cuda.is_available():
+ torch.backends.cuda.matmul.allow_tf32 = True
+ torch.backends.cudnn.allow_tf32 = True
+
+
+@contextlib.contextmanager
+def _inference_mode():
+ with InferenceContext(), torch.inference_mode(), torch.no_grad(), autocast():
+ yield
+
+
+def _clear_cuda_cache():
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ torch.cuda.synchronize()
+
+
+def clean_models(model_key=None):
+ global models
+ model_keys = [model_key] if model_key is not None else list(models.keys())
+ for k in model_keys:
+ if k in models:
+ del models[k]
+ _clear_cuda_cache()
+ gc.collect()
+
+
+def _load_model(ckpt_path, device, use_small=False, model_type="text"):
+ if model_type == "text":
+ ConfigClass = GPTConfig
+ ModelClass = GPT
+ elif model_type == "coarse":
+ ConfigClass = GPTConfig
+ ModelClass = GPT
+ elif model_type == "fine":
+ ConfigClass = FineGPTConfig
+ ModelClass = FineGPT
+ else:
+ raise NotImplementedError()
+
+ # Force-remove Models to allow running on >12Gb GPU
+ # CF: Probably not needed anymore
+ #global models
+ #models.clear()
+ #gc.collect()
+ #torch.cuda.empty_cache()
+ # to here...
+
+ model_key = f"{model_type}_small" if use_small or USE_SMALL_MODELS else model_type
+ model_info = REMOTE_MODEL_PATHS[model_key]
+ if not os.path.exists(ckpt_path):
+ logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.")
+ ## added next two lines to make it super clear which model is being downloaded
+ remote_filename = hf_hub_url(model_info["repo_id"], model_info["file_name"])
+ print(f"Downloading {model_key} {model_info['repo_id']} remote model file {remote_filename} {model_info['file_name']} to {CACHE_DIR}")
+ _download(model_info["repo_id"], model_info["file_name"])
+ # add next line to make it super clear which model is being loaded
+ print(f"Loading {model_key} model from {ckpt_path} to {device}") # added
+ checkpoint = torch.load(ckpt_path, map_location=device)
+ # this is a hack
+ model_args = checkpoint["model_args"]
+ if "input_vocab_size" not in model_args:
+ model_args["input_vocab_size"] = model_args["vocab_size"]
+ model_args["output_vocab_size"] = model_args["vocab_size"]
+ del model_args["vocab_size"]
+ gptconf = ConfigClass(**checkpoint["model_args"])
+ model = ModelClass(gptconf)
+ state_dict = checkpoint["model"]
+ # fixup checkpoint
+ unwanted_prefix = "_orig_mod."
+ for k, v in list(state_dict.items()):
+ if k.startswith(unwanted_prefix):
+ state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)
+ extra_keys = set(state_dict.keys()) - set(model.state_dict().keys())
+ extra_keys = set([k for k in extra_keys if not k.endswith(".attn.bias")])
+ missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
+ missing_keys = set([k for k in missing_keys if not k.endswith(".attn.bias")])
+ if len(extra_keys) != 0:
+ raise ValueError(f"extra keys found: {extra_keys}")
+ if len(missing_keys) != 0:
+ raise ValueError(f"missing keys: {missing_keys}")
+ model.load_state_dict(state_dict, strict=False)
+ n_params = model.get_num_params()
+ val_loss = checkpoint["best_val_loss"].item()
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss")
+ model.eval()
+ model.to(device)
+ del checkpoint, state_dict
+ _clear_cuda_cache()
+ if model_type == "text":
+ tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased")
+ return {
+ "model": model,
+ "tokenizer": tokenizer,
+ }
+ return model
+
+
+def _load_codec_model(device):
+ model = EncodecModel.encodec_model_24khz()
+ model.set_target_bandwidth(6.0)
+ model.eval()
+ model.to(device)
+ _clear_cuda_cache()
+ return model
+
+
+def load_model(use_gpu=True, use_small=False, force_reload=False, model_type="text"):
+ _load_model_f = funcy.partial(_load_model, model_type=model_type, use_small=use_small)
+ if model_type not in ("text", "coarse", "fine"):
+ raise NotImplementedError()
+ global models
+ global models_devices
+ device = grab_best_device(use_gpu=use_gpu)
+ model_key = f"{model_type}"
+ if OFFLOAD_CPU:
+ models_devices[model_key] = device
+ device = "cpu"
+ if model_key not in models or force_reload:
+ ckpt_path = _get_ckpt_path(model_type, use_small=use_small)
+ clean_models(model_key=model_key)
+ model = _load_model_f(ckpt_path, device)
+ models[model_key] = model
+ if model_type == "text":
+ models[model_key]["model"].to(device)
+ else:
+ models[model_key].to(device)
+ return models[model_key]
+
+
+def load_codec_model(use_gpu=True, force_reload=False):
+ global models
+ global models_devices
+ device = grab_best_device(use_gpu=use_gpu)
+ if device == "mps":
+ # encodec doesn't support mps
+ device = "cpu"
+ model_key = "codec"
+ if OFFLOAD_CPU:
+ models_devices[model_key] = device
+ device = "cpu"
+ if model_key not in models or force_reload:
+ clean_models(model_key=model_key)
+ model = _load_codec_model(device)
+ models[model_key] = model
+ models[model_key].to(device)
+ return models[model_key]
+
+
+def preload_models(
+ text_use_gpu=True,
+ text_use_small=False,
+ coarse_use_gpu=True,
+ coarse_use_small=False,
+ fine_use_gpu=True,
+ fine_use_small=False,
+ codec_use_gpu=True,
+ force_reload=False
+):
+ """Load all the necessary models for the pipeline."""
+ if grab_best_device() == "cpu" and (
+ text_use_gpu or coarse_use_gpu or fine_use_gpu or codec_use_gpu
+ ):
+ logger.warning("No GPU being used. Careful, inference might be very slow!")
+ _ = load_model(
+ model_type="text", use_gpu=text_use_gpu, use_small=text_use_small, force_reload=force_reload
+ )
+ _ = load_model(
+ model_type="coarse",
+ use_gpu=coarse_use_gpu,
+ use_small=coarse_use_small,
+ force_reload=force_reload,
+ )
+ _ = load_model(
+ model_type="fine", use_gpu=fine_use_gpu, use_small=fine_use_small, force_reload=force_reload
+ )
+ _ = load_codec_model(use_gpu=codec_use_gpu, force_reload=force_reload)
+
+
+####
+# Generation Functionality
+####
+
+
+def _tokenize(tokenizer, text):
+ return tokenizer.encode(text, add_special_tokens=False)
+
+
+def _detokenize(tokenizer, enc_text):
+ return tokenizer.decode(enc_text)
+
+
+def _normalize_whitespace(text):
+ return re.sub(r"\s+", " ", text).strip()
+
+
+TEXT_ENCODING_OFFSET = 10_048
+SEMANTIC_PAD_TOKEN = 10_000
+TEXT_PAD_TOKEN = 129_595
+SEMANTIC_INFER_TOKEN = 129_599
+
+
+def _load_history_prompt(history_prompt_input):
+ if isinstance(history_prompt_input, str) and history_prompt_input.endswith(".npz"):
+ history_prompt = np.load(history_prompt_input)
+ elif isinstance(history_prompt_input, str):
+ # make sure this works on non-ubuntu
+ history_prompt_input = os.path.join(*history_prompt_input.split("/"))
+# if history_prompt_input not in ALLOWED_PROMPTS:
+# raise ValueError("history prompt not found")
+ history_prompt = np.load(
+ os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt_input}.npz")
+ )
+ elif isinstance(history_prompt_input, dict):
+ assert("semantic_prompt" in history_prompt_input)
+ assert("coarse_prompt" in history_prompt_input)
+ assert("fine_prompt" in history_prompt_input)
+ history_prompt = history_prompt_input
+ else:
+ raise ValueError("history prompt format unrecognized")
+ return history_prompt
+
+
+def generate_text_semantic(
+ text,
+ history_prompt=None,
+ temp=0.7,
+ top_k=None,
+ top_p=None,
+ silent=False,
+ min_eos_p=0.2,
+ max_gen_duration_s=None,
+ allow_early_stop=True,
+ use_kv_caching=False,
+):
+ """Generate semantic tokens from text."""
+ assert isinstance(text, str)
+ text = _normalize_whitespace(text)
+ assert len(text.strip()) > 0
+ if history_prompt is not None:
+ history_prompt = _load_history_prompt(history_prompt)
+ semantic_history = history_prompt["semantic_prompt"]
+ assert (
+ isinstance(semantic_history, np.ndarray)
+ and len(semantic_history.shape) == 1
+ and len(semantic_history) > 0
+ and semantic_history.min() >= 0
+ and semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
+ )
+ else:
+ semantic_history = None
+ # load models if not yet exist
+ global models
+ global models_devices
+ if "text" not in models:
+ preload_models()
+ model_container = models["text"]
+ model = model_container["model"]
+ tokenizer = model_container["tokenizer"]
+ encoded_text = np.array(_tokenize(tokenizer, text)) + TEXT_ENCODING_OFFSET
+ if OFFLOAD_CPU:
+ model.to(models_devices["text"])
+ device = next(model.parameters()).device
+ if len(encoded_text) > 256:
+ p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1)
+ logger.warning(f"warning, text too long, lopping of last {p}%")
+ encoded_text = encoded_text[:256]
+ encoded_text = np.pad(
+ encoded_text,
+ (0, 256 - len(encoded_text)),
+ constant_values=TEXT_PAD_TOKEN,
+ mode="constant",
+ )
+ if semantic_history is not None:
+ semantic_history = semantic_history.astype(np.int64)
+ # lop off if history is too long, pad if needed
+ semantic_history = semantic_history[-256:]
+ semantic_history = np.pad(
+ semantic_history,
+ (0, 256 - len(semantic_history)),
+ constant_values=SEMANTIC_PAD_TOKEN,
+ mode="constant",
+ )
+ else:
+ semantic_history = np.array([SEMANTIC_PAD_TOKEN] * 256)
+ x = torch.from_numpy(
+ np.hstack([
+ encoded_text, semantic_history, np.array([SEMANTIC_INFER_TOKEN])
+ ]).astype(np.int64)
+ )[None]
+ assert x.shape[1] == 256 + 256 + 1
+ with _inference_mode():
+ x = x.to(device)
+ n_tot_steps = 768
+ # custom tqdm updates since we don't know when eos will occur
+ pbar = tqdm.tqdm(disable=silent, total=n_tot_steps)
+ pbar_state = 0
+ tot_generated_duration_s = 0
+ kv_cache = None
+ for n in range(n_tot_steps):
+ if use_kv_caching and kv_cache is not None:
+ x_input = x[:, [-1]]
+ else:
+ x_input = x
+ logits, kv_cache = model(
+ x_input, merge_context=True, use_cache=use_kv_caching, past_kv=kv_cache
+ )
+ relevant_logits = logits[0, 0, :SEMANTIC_VOCAB_SIZE]
+ if allow_early_stop:
+ relevant_logits = torch.hstack(
+ (relevant_logits, logits[0, 0, [SEMANTIC_PAD_TOKEN]]) # eos
+ )
+ if top_p is not None:
+ # faster to convert to numpy
+ original_device = relevant_logits.device
+ relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
+ sorted_indices = np.argsort(relevant_logits)[::-1]
+ sorted_logits = relevant_logits[sorted_indices]
+ cumulative_probs = np.cumsum(softmax(sorted_logits))
+ sorted_indices_to_remove = cumulative_probs > top_p
+ sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
+ sorted_indices_to_remove[0] = False
+ relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
+ relevant_logits = torch.from_numpy(relevant_logits)
+ relevant_logits = relevant_logits.to(original_device)
+ if top_k is not None:
+ v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
+ relevant_logits[relevant_logits < v[-1]] = -float("Inf")
+ probs = F.softmax(relevant_logits / temp, dim=-1)
+ item_next = torch.multinomial(probs, num_samples=1).to(torch.int32)
+ if allow_early_stop and (
+ item_next == SEMANTIC_VOCAB_SIZE
+ or (min_eos_p is not None and probs[-1] >= min_eos_p)
+ ):
+ # eos found, so break
+ pbar.update(n - pbar_state)
+ break
+ x = torch.cat((x, item_next[None]), dim=1)
+ tot_generated_duration_s += 1 / SEMANTIC_RATE_HZ
+ if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s:
+ pbar.update(n - pbar_state)
+ break
+ if n == n_tot_steps - 1:
+ pbar.update(n - pbar_state)
+ break
+ del logits, relevant_logits, probs, item_next
+
+ if n > pbar_state:
+ if n > pbar.total:
+ pbar.total = n
+ pbar.update(n - pbar_state)
+ pbar_state = n
+ pbar.total = n
+ pbar.refresh()
+ pbar.close()
+ out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :]
+ if OFFLOAD_CPU:
+ model.to("cpu")
+ assert all(0 <= out) and all(out < SEMANTIC_VOCAB_SIZE)
+ _clear_cuda_cache()
+ return out
+
+
+def _flatten_codebooks(arr, offset_size=CODEBOOK_SIZE):
+ assert len(arr.shape) == 2
+ arr = arr.copy()
+ if offset_size is not None:
+ for n in range(1, arr.shape[0]):
+ arr[n, :] += offset_size * n
+ flat_arr = arr.ravel("F")
+ return flat_arr
+
+
+COARSE_SEMANTIC_PAD_TOKEN = 12_048
+COARSE_INFER_TOKEN = 12_050
+
+
+def generate_coarse(
+ x_semantic,
+ history_prompt=None,
+ temp=0.7,
+ top_k=None,
+ top_p=None,
+ silent=False,
+ max_coarse_history=630, # min 60 (faster), max 630 (more context)
+ sliding_window_len=60,
+ use_kv_caching=False,
+):
+ """Generate coarse audio codes from semantic tokens."""
+# CF: Uncommented because it breaks swap voice more than once
+# assert (
+# isinstance(x_semantic, np.ndarray)
+# and len(x_semantic.shape) == 1
+# and len(x_semantic) > 0
+# and x_semantic.min() >= 0
+# and x_semantic.max() <= SEMANTIC_VOCAB_SIZE - 1
+# )
+ assert 60 <= max_coarse_history <= 630
+ assert max_coarse_history + sliding_window_len <= 1024 - 256
+ semantic_to_coarse_ratio = COARSE_RATE_HZ / SEMANTIC_RATE_HZ * N_COARSE_CODEBOOKS
+ max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))
+ if history_prompt is not None:
+ history_prompt = _load_history_prompt(history_prompt)
+ x_semantic_history = history_prompt["semantic_prompt"]
+ x_coarse_history = history_prompt["coarse_prompt"]
+ assert (
+ isinstance(x_semantic_history, np.ndarray)
+ and len(x_semantic_history.shape) == 1
+ and len(x_semantic_history) > 0
+ and x_semantic_history.min() >= 0
+ and x_semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
+ and isinstance(x_coarse_history, np.ndarray)
+ and len(x_coarse_history.shape) == 2
+ and x_coarse_history.shape[0] == N_COARSE_CODEBOOKS
+ and x_coarse_history.shape[-1] >= 0
+ and x_coarse_history.min() >= 0
+ and x_coarse_history.max() <= CODEBOOK_SIZE - 1
+ #and (
+ # round(x_coarse_history.shape[-1] / len(x_semantic_history), 1)
+ # == round(semantic_to_coarse_ratio / N_COARSE_CODEBOOKS, 1)
+ #)
+ )
+ x_coarse_history = _flatten_codebooks(x_coarse_history) + SEMANTIC_VOCAB_SIZE
+ # trim histories correctly
+ n_semantic_hist_provided = np.min(
+ [
+ max_semantic_history,
+ len(x_semantic_history) - len(x_semantic_history) % 2,
+ int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)),
+ ]
+ )
+ n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))
+ x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32)
+ x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32)
+ # TODO: bit of a hack for time alignment (sounds better)
+ x_coarse_history = x_coarse_history[:-2]
+ else:
+ x_semantic_history = np.array([], dtype=np.int32)
+ x_coarse_history = np.array([], dtype=np.int32)
+ # load models if not yet exist
+ global models
+ global models_devices
+ if "coarse" not in models:
+ preload_models()
+ model = models["coarse"]
+ if OFFLOAD_CPU:
+ model.to(models_devices["coarse"])
+ device = next(model.parameters()).device
+ # start loop
+ n_steps = int(
+ round(
+ np.floor(len(x_semantic) * semantic_to_coarse_ratio / N_COARSE_CODEBOOKS)
+ * N_COARSE_CODEBOOKS
+ )
+ )
+ assert n_steps > 0 and n_steps % N_COARSE_CODEBOOKS == 0
+ x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32)
+ x_coarse = x_coarse_history.astype(np.int32)
+ base_semantic_idx = len(x_semantic_history)
+ with _inference_mode():
+ x_semantic_in = torch.from_numpy(x_semantic)[None].to(device)
+ x_coarse_in = torch.from_numpy(x_coarse)[None].to(device)
+ n_window_steps = int(np.ceil(n_steps / sliding_window_len))
+ n_step = 0
+ for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent):
+ semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio))
+ # pad from right side
+ x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :]
+ x_in = x_in[:, :256]
+ x_in = F.pad(
+ x_in,
+ (0, 256 - x_in.shape[-1]),
+ "constant",
+ COARSE_SEMANTIC_PAD_TOKEN,
+ )
+ x_in = torch.hstack(
+ [
+ x_in,
+ torch.tensor([COARSE_INFER_TOKEN])[None].to(device),
+ x_coarse_in[:, -max_coarse_history:],
+ ]
+ )
+ kv_cache = None
+ for _ in range(sliding_window_len):
+ if n_step >= n_steps:
+ continue
+ is_major_step = n_step % N_COARSE_CODEBOOKS == 0
+
+ if use_kv_caching and kv_cache is not None:
+ x_input = x_in[:, [-1]]
+ else:
+ x_input = x_in
+
+ logits, kv_cache = model(x_input, use_cache=use_kv_caching, past_kv=kv_cache)
+ logit_start_idx = (
+ SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * CODEBOOK_SIZE
+ )
+ logit_end_idx = (
+ SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * CODEBOOK_SIZE
+ )
+ relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx]
+ if top_p is not None:
+ # faster to convert to numpy
+ original_device = relevant_logits.device
+ relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
+ sorted_indices = np.argsort(relevant_logits)[::-1]
+ sorted_logits = relevant_logits[sorted_indices]
+ cumulative_probs = np.cumsum(softmax(sorted_logits))
+ sorted_indices_to_remove = cumulative_probs > top_p
+ sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
+ sorted_indices_to_remove[0] = False
+ relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
+ relevant_logits = torch.from_numpy(relevant_logits)
+ relevant_logits = relevant_logits.to(original_device)
+ if top_k is not None:
+ v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
+ relevant_logits[relevant_logits < v[-1]] = -float("Inf")
+ probs = F.softmax(relevant_logits / temp, dim=-1)
+ item_next = torch.multinomial(probs, num_samples=1).to(torch.int32)
+ item_next += logit_start_idx
+ x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1)
+ x_in = torch.cat((x_in, item_next[None]), dim=1)
+ del logits, relevant_logits, probs, item_next
+ n_step += 1
+ del x_in
+ del x_semantic_in
+ if OFFLOAD_CPU:
+ model.to("cpu")
+ gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :]
+ del x_coarse_in
+ assert len(gen_coarse_arr) == n_steps
+ gen_coarse_audio_arr = gen_coarse_arr.reshape(-1, N_COARSE_CODEBOOKS).T - SEMANTIC_VOCAB_SIZE
+ for n in range(1, N_COARSE_CODEBOOKS):
+ gen_coarse_audio_arr[n, :] -= n * CODEBOOK_SIZE
+ _clear_cuda_cache()
+ return gen_coarse_audio_arr
+
+
+def generate_fine(
+ x_coarse_gen,
+ history_prompt=None,
+ temp=0.5,
+ silent=True,
+):
+ """Generate full audio codes from coarse audio codes."""
+ assert (
+ isinstance(x_coarse_gen, np.ndarray)
+ and len(x_coarse_gen.shape) == 2
+ and 1 <= x_coarse_gen.shape[0] <= N_FINE_CODEBOOKS - 1
+ and x_coarse_gen.shape[1] > 0
+ and x_coarse_gen.min() >= 0
+ and x_coarse_gen.max() <= CODEBOOK_SIZE - 1
+ )
+ if history_prompt is not None:
+ history_prompt = _load_history_prompt(history_prompt)
+ x_fine_history = history_prompt["fine_prompt"]
+ assert (
+ isinstance(x_fine_history, np.ndarray)
+ and len(x_fine_history.shape) == 2
+ and x_fine_history.shape[0] == N_FINE_CODEBOOKS
+ and x_fine_history.shape[1] >= 0
+ and x_fine_history.min() >= 0
+ and x_fine_history.max() <= CODEBOOK_SIZE - 1
+ )
+ else:
+ x_fine_history = None
+ n_coarse = x_coarse_gen.shape[0]
+ # load models if not yet exist
+ global models
+ global models_devices
+ if "fine" not in models:
+ preload_models()
+ model = models["fine"]
+ if OFFLOAD_CPU:
+ model.to(models_devices["fine"])
+ device = next(model.parameters()).device
+ # make input arr
+ in_arr = np.vstack(
+ [
+ x_coarse_gen,
+ np.zeros((N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1]))
+ + CODEBOOK_SIZE, # padding
+ ]
+ ).astype(np.int32)
+ # prepend history if available (max 512)
+ if x_fine_history is not None:
+ x_fine_history = x_fine_history.astype(np.int32)
+ in_arr = np.hstack(
+ [
+ x_fine_history[:, -512:].astype(np.int32),
+ in_arr,
+ ]
+ )
+ n_history = x_fine_history[:, -512:].shape[1]
+ else:
+ n_history = 0
+ n_remove_from_end = 0
+ # need to pad if too short (since non-causal model)
+ if in_arr.shape[1] < 1024:
+ n_remove_from_end = 1024 - in_arr.shape[1]
+ in_arr = np.hstack(
+ [
+ in_arr,
+ np.zeros((N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32) + CODEBOOK_SIZE,
+ ]
+ )
+ # we can be lazy about fractional loop and just keep overwriting codebooks
+ n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1
+ with _inference_mode():
+ in_arr = torch.tensor(in_arr.T).to(device)
+ for n in tqdm.tqdm(range(n_loops), disable=silent):
+ start_idx = np.min([n * 512, in_arr.shape[0] - 1024])
+ start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512])
+ rel_start_fill_idx = start_fill_idx - start_idx
+ in_buffer = in_arr[start_idx : start_idx + 1024, :][None]
+ for nn in range(n_coarse, N_FINE_CODEBOOKS):
+ logits = model(nn, in_buffer)
+ if temp is None:
+ relevant_logits = logits[0, rel_start_fill_idx:, :CODEBOOK_SIZE]
+ codebook_preds = torch.argmax(relevant_logits, -1)
+ else:
+ relevant_logits = logits[0, :, :CODEBOOK_SIZE] / temp
+ probs = F.softmax(relevant_logits, dim=-1)
+ codebook_preds = torch.multinomial(
+ probs[rel_start_fill_idx:1024], num_samples=1
+ ).reshape(-1)
+ codebook_preds = codebook_preds.to(torch.int32)
+ in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds
+ del logits, codebook_preds
+ # transfer over info into model_in and convert to numpy
+ for nn in range(n_coarse, N_FINE_CODEBOOKS):
+ in_arr[
+ start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn
+ ] = in_buffer[0, rel_start_fill_idx:, nn]
+ del in_buffer
+ gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T
+ del in_arr
+ if OFFLOAD_CPU:
+ model.to("cpu")
+ gen_fine_arr = gen_fine_arr[:, n_history:]
+ if n_remove_from_end > 0:
+ gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end]
+ assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1]
+ _clear_cuda_cache()
+ return gen_fine_arr
+
+
+def codec_decode(fine_tokens):
+ """Turn quantized audio codes into audio array using encodec."""
+ # load models if not yet exist
+ global models
+ global models_devices
+ if "codec" not in models:
+ preload_models()
+ model = models["codec"]
+ if OFFLOAD_CPU:
+ model.to(models_devices["codec"])
+ device = next(model.parameters()).device
+ arr = torch.from_numpy(fine_tokens)[None]
+ arr = arr.to(device)
+ arr = arr.transpose(0, 1)
+ emb = model.quantizer.decode(arr)
+ out = model.decoder(emb)
+ audio_arr = out.detach().cpu().numpy().squeeze()
+ del arr, emb, out
+ if OFFLOAD_CPU:
+ model.to("cpu")
+ return audio_arr
diff --git a/bark/hubert/__init__.py b/bark/hubert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bark/hubert/__pycache__/__init__.cpython-313.pyc b/bark/hubert/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cba3209c6a81dc7272e5281e6cfa0e8adecba75f
Binary files /dev/null and b/bark/hubert/__pycache__/__init__.cpython-313.pyc differ
diff --git a/bark/hubert/__pycache__/customtokenizer.cpython-313.pyc b/bark/hubert/__pycache__/customtokenizer.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..591b88bb9ac7f9090533c04628be5306b43a38f6
Binary files /dev/null and b/bark/hubert/__pycache__/customtokenizer.cpython-313.pyc differ
diff --git a/bark/hubert/__pycache__/hubert_manager.cpython-313.pyc b/bark/hubert/__pycache__/hubert_manager.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9babb794e965c98e5fe2f542cd9dbd2a3c2684e8
Binary files /dev/null and b/bark/hubert/__pycache__/hubert_manager.cpython-313.pyc differ
diff --git a/bark/hubert/__pycache__/pre_kmeans_hubert.cpython-313.pyc b/bark/hubert/__pycache__/pre_kmeans_hubert.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a153a1fbdb9de95f27c4cb25e8fbbaf8c629e459
Binary files /dev/null and b/bark/hubert/__pycache__/pre_kmeans_hubert.cpython-313.pyc differ
diff --git a/bark/hubert/customtokenizer.py b/bark/hubert/customtokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0cbdbf30285c9b707aa5e11eb63dff0902bbb96
--- /dev/null
+++ b/bark/hubert/customtokenizer.py
@@ -0,0 +1,195 @@
+"""
+Custom tokenizer model.
+Author: https://www.github.com/gitmylo/
+License: MIT
+"""
+
+import json
+import os.path
+from zipfile import ZipFile
+
+import numpy
+import torch
+from torch import nn, optim
+from torch.serialization import MAP_LOCATION
+from tqdm.auto import tqdm
+
+
+class CustomTokenizer(nn.Module):
+ def __init__(self, hidden_size=1024, input_size=768, output_size=10000, version=0):
+ super(CustomTokenizer, self).__init__()
+ next_size = input_size
+ if version == 0:
+ self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True)
+ next_size = hidden_size
+ if version == 1:
+ self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True)
+ self.intermediate = nn.Linear(hidden_size, 4096)
+ next_size = 4096
+
+ self.fc = nn.Linear(next_size, output_size)
+ self.softmax = nn.LogSoftmax(dim=1)
+ self.optimizer: optim.Optimizer = None
+ self.lossfunc = nn.CrossEntropyLoss()
+ self.input_size = input_size
+ self.hidden_size = hidden_size
+ self.output_size = output_size
+ self.version = version
+
+ def forward(self, x):
+ x, _ = self.lstm(x)
+ if self.version == 1:
+ x = self.intermediate(x)
+ x = self.fc(x)
+ x = self.softmax(x)
+ return x
+
+ @torch.no_grad()
+ def get_token(self, x):
+ """
+ Used to get the token for the first
+ :param x: An array with shape (N, input_size) where N is a whole number greater or equal to 1, and input_size is the input size used when creating the model.
+ :return: An array with shape (N,) where N is the same as N from the input. Every number in the array is a whole number in range 0...output_size - 1 where output_size is the output size used when creating the model.
+ """
+ return torch.argmax(self(x), dim=1)
+
+ def prepare_training(self):
+ self.optimizer = optim.Adam(self.parameters(), 0.001)
+
+ def train_step(self, x_train, y_train, log_loss=False):
+ # y_train = y_train[:-1]
+ # y_train = y_train[1:]
+
+ optimizer = self.optimizer
+ lossfunc = self.lossfunc
+ # Zero the gradients
+ self.zero_grad()
+
+ # Forward pass
+ y_pred = self(x_train)
+
+ y_train_len = len(y_train)
+ y_pred_len = y_pred.shape[0]
+
+ if y_train_len > y_pred_len:
+ diff = y_train_len - y_pred_len
+ y_train = y_train[diff:]
+ elif y_train_len < y_pred_len:
+ diff = y_pred_len - y_train_len
+ y_pred = y_pred[:-diff, :]
+
+ y_train_hot = torch.zeros(len(y_train), self.output_size)
+ y_train_hot[range(len(y_train)), y_train] = 1
+ y_train_hot = y_train_hot.to('cuda')
+
+ # Calculate the loss
+ loss = lossfunc(y_pred, y_train_hot)
+
+ # Print loss
+ if log_loss:
+ print('Loss', loss.item())
+
+ # Backward pass
+ loss.backward()
+
+ # Update the weights
+ optimizer.step()
+
+ def save(self, path):
+ info_path = '.'.join(os.path.basename(path).split('.')[:-1]) + '/.info'
+ torch.save(self.state_dict(), path)
+ data_from_model = Data(self.input_size, self.hidden_size, self.output_size, self.version)
+ with ZipFile(path, 'a') as model_zip:
+ model_zip.writestr(info_path, data_from_model.save())
+ model_zip.close()
+
+ @staticmethod
+ def load_from_checkpoint(path, map_location: MAP_LOCATION = None):
+ old = True
+ with ZipFile(path) as model_zip:
+ filesMatch = [file for file in model_zip.namelist() if file.endswith('/.info')]
+ file = filesMatch[0] if filesMatch else None
+ if file:
+ old = False
+ print(f"Loading Custom Hubert Tokenizer {path}")
+ data_from_model = Data.load(model_zip.read(file).decode('utf-8'))
+ model_zip.close()
+ if old:
+ model = CustomTokenizer()
+ else:
+ model = CustomTokenizer(data_from_model.hidden_size, data_from_model.input_size, data_from_model.output_size, data_from_model.version)
+ model.load_state_dict(torch.load(path))
+ if map_location:
+ model = model.to(map_location)
+ return model
+
+
+
+class Data:
+ input_size: int
+ hidden_size: int
+ output_size: int
+ version: int
+
+ def __init__(self, input_size=768, hidden_size=1024, output_size=10000, version=0):
+ self.input_size = input_size
+ self.hidden_size = hidden_size
+ self.output_size = output_size
+ self.version = version
+
+ @staticmethod
+ def load(string):
+ data = json.loads(string)
+ return Data(data['input_size'], data['hidden_size'], data['output_size'], data['version'])
+
+ def save(self):
+ data = {
+ 'input_size': self.input_size,
+ 'hidden_size': self.hidden_size,
+ 'output_size': self.output_size,
+ 'version': self.version,
+ }
+ return json.dumps(data)
+
+
+def auto_train(data_path, save_path='model.pth', load_model: str | None = None, save_epochs=1, max_epochs=14):
+ data_x, data_y = [], []
+
+ if load_model and os.path.isfile(load_model):
+ print('Loading model from', load_model)
+ model_training = CustomTokenizer.load_from_checkpoint(load_model, 'cuda')
+ else:
+ print('Creating new model.')
+ model_training = CustomTokenizer(version=1).to('cuda') # Settings for the model to run without lstm
+ save_path = os.path.join(data_path, save_path)
+ base_save_path = '.'.join(save_path.split('.')[:-1])
+
+ sem_string = '_semantic.npy'
+ feat_string = '_semantic_features.npy'
+
+ ready = os.path.join(data_path, 'ready')
+ for input_file in os.listdir(ready):
+ full_path = os.path.join(ready, input_file)
+ if input_file.endswith(sem_string):
+ data_y.append(numpy.load(full_path))
+ elif input_file.endswith(feat_string):
+ data_x.append(numpy.load(full_path))
+ model_training.prepare_training()
+
+ epoch = 1
+ with tqdm(total=((len(data_x) * len(data_y)) / 50) * save_epochs) as pbar1:
+ while epoch <= max_epochs:
+ for i in range(save_epochs):
+ j = 0
+ for x, y in zip(data_x, data_y):
+ model_training.train_step(torch.tensor(x).to('cuda'), torch.tensor(y).to('cuda'), j % 50 == 0) # Print loss every 50 steps
+ j += 1
+ pbar1.update()
+
+ save_p = save_path
+ save_p_2 = f'{base_save_path}_epoch_{epoch}.pth'
+ model_training.save(save_p)
+ model_training.save(save_p_2)
+ print(f'Epoch {epoch} completed')
+ epoch += 1
+ print(f'Done training for {max_epochs} epochs!')
\ No newline at end of file
diff --git a/bark/hubert/hubert_manager.py b/bark/hubert/hubert_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf6ffa9c327be26fbf50ad3a0c5a5a3ee037d32d
--- /dev/null
+++ b/bark/hubert/hubert_manager.py
@@ -0,0 +1,51 @@
+import os.path
+import shutil
+import urllib.request
+
+import huggingface_hub
+
+
+class HuBERTManager:
+
+
+ @staticmethod
+ def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'):
+ install_dir = os.path.join('models', 'hubert')
+ if not os.path.isdir(install_dir):
+ os.makedirs(install_dir, exist_ok=True)
+ install_file = os.path.join(install_dir, file_name)
+ if not os.path.isfile(install_file):
+ print(f'Downloading HuBERT base model from {download_url}')
+ urllib.request.urlretrieve(download_url, install_file)
+ print('Downloaded HuBERT')
+ return install_file
+
+
+ @staticmethod
+ def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', tokenizer_lang: str = 'en'):
+ local_file = tokenizer_lang + '_tokenizer.pth'
+ install_dir = os.path.join('models', 'hubert')
+ if not os.path.isdir(install_dir):
+ os.makedirs(install_dir, exist_ok=True)
+ install_file = os.path.join(install_dir, local_file)
+ if not os.path.isfile(install_file):
+ # refactor to use lists
+ if tokenizer_lang == 'en':
+ repo = 'GitMylo/bark-voice-cloning'
+ model = 'quantifier_hubert_base_ls960_14.pth'
+ elif tokenizer_lang == 'es':
+ repo = 'Lancer1408/bark-es-tokenizer'
+ model = 'es_tokenizer.pth'
+ elif tokenizer_lang == 'de':
+ repo = 'CountFloyd/bark-voice-cloning-german-HuBERT-quantizer'
+ model = 'german-HuBERT-quantizer_14_epoch.pth'
+ elif tokenizer_lang == 'pl':
+ repo = 'Hobis/bark-voice-cloning-polish-HuBERT-quantizer'
+ model = 'polish-HuBERT-quantizer_8_epoch.pth'
+ else:
+ raise 'Unknown Tokenizer Language!'
+ print(f'{local_file} not found. Downloading HuBERT custom tokenizer')
+ huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False)
+ shutil.move(os.path.join(install_dir, model), install_file)
+ print('Downloaded tokenizer')
+ return install_file
diff --git a/bark/hubert/pre_kmeans_hubert.py b/bark/hubert/pre_kmeans_hubert.py
new file mode 100644
index 0000000000000000000000000000000000000000..5208bd2792dd32e7f761ae787927a70bdcb2e5d6
--- /dev/null
+++ b/bark/hubert/pre_kmeans_hubert.py
@@ -0,0 +1,107 @@
+"""
+Modified HuBERT model without kmeans.
+Original author: https://github.com/lucidrains/
+Modified by: https://www.github.com/gitmylo/
+License: MIT
+"""
+
+# Modified code from https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/hubert_kmeans.py
+
+from pathlib import Path
+
+import torch
+from torch import nn
+from einops import pack, unpack
+
+import fairseq
+
+from torchaudio.functional import resample
+
+from audiolm_pytorch.utils import curtail_to_multiple
+
+import logging
+logging.root.setLevel(logging.ERROR)
+
+
+def exists(val):
+ return val is not None
+
+
+def default(val, d):
+ return val if exists(val) else d
+
+
+class CustomHubert(nn.Module):
+ """
+ checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
+ or you can train your own
+ """
+
+ def __init__(
+ self,
+ checkpoint_path,
+ target_sample_hz=16000,
+ seq_len_multiple_of=None,
+ output_layer=9,
+ device=None
+ ):
+ super().__init__()
+ self.target_sample_hz = target_sample_hz
+ self.seq_len_multiple_of = seq_len_multiple_of
+ self.output_layer = output_layer
+
+ if device is not None:
+ self.to(device)
+
+ model_path = Path(checkpoint_path)
+
+ assert model_path.exists(), f'path {checkpoint_path} does not exist'
+
+ print(f"Loading Hubert {checkpoint_path}")
+ checkpoint = torch.load(checkpoint_path)
+ load_model_input = {checkpoint_path: checkpoint}
+ model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
+
+ if device is not None:
+ model[0].to(device)
+
+ self.model = model[0]
+ self.model.eval()
+
+ @property
+ def groups(self):
+ return 1
+
+ @torch.no_grad()
+ def forward(
+ self,
+ wav_input,
+ flatten=True,
+ input_sample_hz=None
+ ):
+ device = wav_input.device
+
+ if exists(input_sample_hz):
+ wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
+
+ if exists(self.seq_len_multiple_of):
+ wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
+
+ embed = self.model(
+ wav_input,
+ features_only=True,
+ mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
+ output_layer=self.output_layer
+ )
+
+ embed, packed_shape = pack([embed['x']], '* d')
+
+ # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy())
+
+ codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long()
+
+ if flatten:
+ return codebook_indices
+
+ codebook_indices, = unpack(codebook_indices, packed_shape, '*')
+ return codebook_indices
diff --git a/bark/model.py b/bark/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..457b49e749f396c47c6b35f44955fd512d233d79
--- /dev/null
+++ b/bark/model.py
@@ -0,0 +1,218 @@
+"""
+Much of this code is adapted from Andrej Karpathy's NanoGPT
+(https://github.com/karpathy/nanoGPT)
+"""
+import math
+from dataclasses import dataclass
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+
+class LayerNorm(nn.Module):
+ """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
+
+ def __init__(self, ndim, bias):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(ndim))
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
+
+ def forward(self, input):
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
+
+class CausalSelfAttention(nn.Module):
+
+ def __init__(self, config):
+ super().__init__()
+ assert config.n_embd % config.n_head == 0
+ # key, query, value projections for all heads, but in a batch
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
+ # output projection
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
+ # regularization
+ self.attn_dropout = nn.Dropout(config.dropout)
+ self.resid_dropout = nn.Dropout(config.dropout)
+ self.n_head = config.n_head
+ self.n_embd = config.n_embd
+ self.dropout = config.dropout
+ # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
+ if not self.flash:
+ # print("WARNING: using slow attention. Flash Attention atm needs PyTorch nightly and dropout=0.0")
+ # causal mask to ensure that attention is only applied to the left in the input sequence
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
+ .view(1, 1, config.block_size, config.block_size))
+
+ def forward(self, x, past_kv=None, use_cache=False):
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
+
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
+ q, k ,v = self.c_attn(x).split(self.n_embd, dim=2)
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+
+ if past_kv is not None:
+ past_key = past_kv[0]
+ past_value = past_kv[1]
+ k = torch.cat((past_key, k), dim=-2)
+ v = torch.cat((past_value, v), dim=-2)
+
+ FULL_T = k.shape[-2]
+
+ if use_cache is True:
+ present = (k, v)
+ else:
+ present = None
+
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
+ if self.flash:
+ # efficient attention using Flash Attention CUDA kernels
+ if past_kv is not None:
+ # When `past_kv` is provided, we're doing incremental decoding and `q.shape[2] == 1`: q only contains
+ # the query for the last token. scaled_dot_product_attention interprets this as the first token in the
+ # sequence, so if is_causal=True it will mask out all attention from it. This is not what we want, so
+ # to work around this we set is_causal=False.
+ is_causal = False
+ else:
+ is_causal = True
+
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout, is_causal=is_causal)
+ else:
+ # manual implementation of attention
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
+ att = att.masked_fill(self.bias[:,:,FULL_T-T:FULL_T,:FULL_T] == 0, float('-inf'))
+ att = F.softmax(att, dim=-1)
+ att = self.attn_dropout(att)
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
+
+ # output projection
+ y = self.resid_dropout(self.c_proj(y))
+ return (y, present)
+
+class MLP(nn.Module):
+
+ def __init__(self, config):
+ super().__init__()
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
+ self.dropout = nn.Dropout(config.dropout)
+ self.gelu = nn.GELU()
+
+ def forward(self, x):
+ x = self.c_fc(x)
+ x = self.gelu(x)
+ x = self.c_proj(x)
+ x = self.dropout(x)
+ return x
+
+class Block(nn.Module):
+
+ def __init__(self, config, layer_idx):
+ super().__init__()
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
+ self.attn = CausalSelfAttention(config)
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
+ self.mlp = MLP(config)
+ self.layer_idx = layer_idx
+
+ def forward(self, x, past_kv=None, use_cache=False):
+ attn_output, prev_kvs = self.attn(self.ln_1(x), past_kv=past_kv, use_cache=use_cache)
+ x = x + attn_output
+ x = x + self.mlp(self.ln_2(x))
+ return (x, prev_kvs)
+
+@dataclass
+class GPTConfig:
+ block_size: int = 1024
+ input_vocab_size: int = 10_048
+ output_vocab_size: int = 10_048
+ n_layer: int = 12
+ n_head: int = 12
+ n_embd: int = 768
+ dropout: float = 0.0
+ bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
+
+class GPT(nn.Module):
+
+ def __init__(self, config):
+ super().__init__()
+ assert config.input_vocab_size is not None
+ assert config.output_vocab_size is not None
+ assert config.block_size is not None
+ self.config = config
+
+ self.transformer = nn.ModuleDict(dict(
+ wte = nn.Embedding(config.input_vocab_size, config.n_embd),
+ wpe = nn.Embedding(config.block_size, config.n_embd),
+ drop = nn.Dropout(config.dropout),
+ h = nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]),
+ ln_f = LayerNorm(config.n_embd, bias=config.bias),
+ ))
+ self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
+
+ def get_num_params(self, non_embedding=True):
+ """
+ Return the number of parameters in the model.
+ For non-embedding count (default), the position embeddings get subtracted.
+ The token embeddings would too, except due to the parameter sharing these
+ params are actually used as weights in the final layer, so we include them.
+ """
+ n_params = sum(p.numel() for p in self.parameters())
+ if non_embedding:
+ n_params -= self.transformer.wte.weight.numel()
+ n_params -= self.transformer.wpe.weight.numel()
+ return n_params
+
+ def forward(self, idx, merge_context=False, past_kv=None, position_ids=None, use_cache=False):
+ device = idx.device
+ b, t = idx.size()
+ if past_kv is not None:
+ assert t == 1
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
+ else:
+ if merge_context:
+ assert(idx.shape[1] >= 256+256+1)
+ t = idx.shape[1] - 256
+ else:
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
+
+ # forward the GPT model itself
+ if merge_context:
+ tok_emb = torch.cat([
+ self.transformer.wte(idx[:,:256]) + self.transformer.wte(idx[:,256:256+256]),
+ self.transformer.wte(idx[:,256+256:])
+ ], dim=1)
+ else:
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
+
+ if past_kv is None:
+ past_length = 0
+ past_kv = tuple([None] * len(self.transformer.h))
+ else:
+ past_length = past_kv[0][0].size(-2)
+
+ if position_ids is None:
+ position_ids = torch.arange(past_length, t + past_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0) # shape (1, t)
+ assert position_ids.shape == (1, t)
+
+ pos_emb = self.transformer.wpe(position_ids) # position embeddings of shape (1, t, n_embd)
+
+ x = self.transformer.drop(tok_emb + pos_emb)
+
+ new_kv = () if use_cache else None
+
+ for i, (block, past_layer_kv) in enumerate(zip(self.transformer.h, past_kv)):
+ x, kv = block(x, past_kv=past_layer_kv, use_cache=use_cache)
+
+ if use_cache:
+ new_kv = new_kv + (kv,)
+
+ x = self.transformer.ln_f(x)
+
+ # inference-time mini-optimization: only forward the lm_head on the very last position
+ logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
+
+ return (logits, new_kv)
diff --git a/bark/model_fine.py b/bark/model_fine.py
new file mode 100644
index 0000000000000000000000000000000000000000..e51d3e6a9de947b6ceb6b8441f72a6b3fe1037f0
--- /dev/null
+++ b/bark/model_fine.py
@@ -0,0 +1,149 @@
+"""
+Much of this code is adapted from Andrej Karpathy's NanoGPT
+(https://github.com/karpathy/nanoGPT)
+"""
+from dataclasses import dataclass
+import math
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+
+from .model import GPT, GPTConfig, MLP
+
+
+class NonCausalSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ assert config.n_embd % config.n_head == 0
+ # key, query, value projections for all heads, but in a batch
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
+ # output projection
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
+ # regularization
+ self.attn_dropout = nn.Dropout(config.dropout)
+ self.resid_dropout = nn.Dropout(config.dropout)
+ self.n_head = config.n_head
+ self.n_embd = config.n_embd
+ self.dropout = config.dropout
+ # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
+ self.flash = (
+ hasattr(torch.nn.functional, "scaled_dot_product_attention")
+ )
+
+ def forward(self, x):
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
+
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
+ if self.flash:
+ # efficient attention using Flash Attention CUDA kernels
+ y = torch.nn.functional.scaled_dot_product_attention(
+ q, k, v, attn_mask=None, dropout_p=self.dropout, is_causal=False
+ )
+ else:
+ # manual implementation of attention
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
+ att = F.softmax(att, dim=-1)
+ att = self.attn_dropout(att)
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
+ y = (
+ y.transpose(1, 2).contiguous().view(B, T, C)
+ ) # re-assemble all head outputs side by side
+
+ # output projection
+ y = self.resid_dropout(self.c_proj(y))
+ return y
+
+
+class FineBlock(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.ln_1 = nn.LayerNorm(config.n_embd)
+ self.attn = NonCausalSelfAttention(config)
+ self.ln_2 = nn.LayerNorm(config.n_embd)
+ self.mlp = MLP(config)
+
+ def forward(self, x):
+ x = x + self.attn(self.ln_1(x))
+ x = x + self.mlp(self.ln_2(x))
+ return x
+
+
+class FineGPT(GPT):
+ def __init__(self, config):
+ super().__init__(config)
+ del self.lm_head
+ self.config = config
+ self.n_codes_total = config.n_codes_total
+ self.transformer = nn.ModuleDict(
+ dict(
+ wtes=nn.ModuleList(
+ [
+ nn.Embedding(config.input_vocab_size, config.n_embd)
+ for _ in range(config.n_codes_total)
+ ]
+ ),
+ wpe=nn.Embedding(config.block_size, config.n_embd),
+ drop=nn.Dropout(config.dropout),
+ h=nn.ModuleList([FineBlock(config) for _ in range(config.n_layer)]),
+ ln_f=nn.LayerNorm(config.n_embd),
+ )
+ )
+ self.lm_heads = nn.ModuleList(
+ [
+ nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
+ for _ in range(config.n_codes_given, self.n_codes_total)
+ ]
+ )
+ for i in range(self.n_codes_total - config.n_codes_given):
+ self.transformer.wtes[i + 1].weight = self.lm_heads[i].weight
+
+ def forward(self, pred_idx, idx):
+ device = idx.device
+ b, t, codes = idx.size()
+ assert (
+ t <= self.config.block_size
+ ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
+ assert pred_idx > 0, "cannot predict 0th codebook"
+ assert codes == self.n_codes_total, (b, t, codes)
+ pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
+
+ # forward the GPT model itself
+ tok_embs = [
+ wte(idx[:, :, i]).unsqueeze(-1) for i, wte in enumerate(self.transformer.wtes)
+ ] # token embeddings of shape (b, t, n_embd)
+ tok_emb = torch.cat(tok_embs, dim=-1)
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)
+ x = tok_emb[:, :, :, : pred_idx + 1].sum(dim=-1)
+ x = self.transformer.drop(x + pos_emb)
+ for block in self.transformer.h:
+ x = block(x)
+ x = self.transformer.ln_f(x)
+ logits = self.lm_heads[pred_idx - self.config.n_codes_given](x)
+ return logits
+
+ def get_num_params(self, non_embedding=True):
+ """
+ Return the number of parameters in the model.
+ For non-embedding count (default), the position embeddings get subtracted.
+ The token embeddings would too, except due to the parameter sharing these
+ params are actually used as weights in the final layer, so we include them.
+ """
+ n_params = sum(p.numel() for p in self.parameters())
+ if non_embedding:
+ for wte in self.transformer.wtes:
+ n_params -= wte.weight.numel()
+ n_params -= self.transformer.wpe.weight.numel()
+ return n_params
+
+
+@dataclass
+class FineGPTConfig(GPTConfig):
+ n_codes_total: int = 8
+ n_codes_given: int = 1
diff --git a/bark/settings.py b/bark/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..81c660f3d2e33b21821583cb34c872c2ca23928b
--- /dev/null
+++ b/bark/settings.py
@@ -0,0 +1,7 @@
+import os
+
+def initenv(args):
+ os.environ['SUNO_USE_SMALL_MODELS'] = str("-smallmodels" in args)
+ os.environ['BARK_FORCE_CPU'] = str("-forcecpu" in args)
+ os.environ['SUNO_ENABLE_MPS'] = str("-enablemps" in args)
+ os.environ['SUNO_OFFLOAD_CPU'] = str("-offloadcpu" in args)
diff --git a/cloning/__init__.py b/cloning/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/cloning/__pycache__/__init__.cpython-313.pyc b/cloning/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5fffc3042c3c1fb788e9ea30f5de4564eb20ef68
Binary files /dev/null and b/cloning/__pycache__/__init__.cpython-313.pyc differ
diff --git a/cloning/__pycache__/clonevoice.cpython-313.pyc b/cloning/__pycache__/clonevoice.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..068801063731a439b61742b3e9572f91959620e2
Binary files /dev/null and b/cloning/__pycache__/clonevoice.cpython-313.pyc differ
diff --git a/cloning/clonevoice.py b/cloning/clonevoice.py
new file mode 100644
index 0000000000000000000000000000000000000000..f980c42df4307b13a0f4b03bb2602c4e4a87f7ff
--- /dev/null
+++ b/cloning/clonevoice.py
@@ -0,0 +1,68 @@
+from bark.generation import load_codec_model, generate_text_semantic, grab_best_device
+from encodec.utils import convert_audio
+from bark.hubert.hubert_manager import HuBERTManager
+from bark.hubert.pre_kmeans_hubert import CustomHubert
+from bark.hubert.customtokenizer import CustomTokenizer
+
+import torchaudio
+import torch
+import os
+import gradio
+
+
+def clone_voice(audio_filepath, tokenizer_lang, dest_filename, progress=gradio.Progress(track_tqdm=True)):
+ # if len(text) < 1:
+ # raise gradio.Error('No transcription text entered!')
+
+ use_gpu = not os.environ.get("BARK_FORCE_CPU", False)
+ progress(0, desc="Loading Codec")
+ model = load_codec_model(use_gpu=use_gpu)
+
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
+ hubert_manager = HuBERTManager()
+ hubert_manager.make_sure_hubert_installed()
+ hubert_manager.make_sure_tokenizer_installed(tokenizer_lang=tokenizer_lang)
+
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
+ # Load HuBERT for semantic tokens
+
+ # Load the HuBERT model
+ device = grab_best_device(use_gpu)
+ hubert_model = CustomHubert(checkpoint_path='./models/hubert/hubert.pt').to(device)
+
+ # Load the CustomTokenizer model
+ tokenizer = CustomTokenizer.load_from_checkpoint(f'./models/hubert/{tokenizer_lang}_tokenizer.pth').to(device) # Automatically uses the right layers
+
+ progress(0.25, desc="Converting WAV")
+
+ # Load and pre-process the audio waveform
+ wav, sr = torchaudio.load(audio_filepath)
+ if wav.shape[0] == 2: # Stereo to mono if needed
+ wav = wav.mean(0, keepdim=True)
+
+ wav = convert_audio(wav, sr, model.sample_rate, model.channels)
+ wav = wav.to(device)
+ progress(0.5, desc="Extracting codes")
+
+ semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate)
+ semantic_tokens = tokenizer.get_token(semantic_vectors)
+
+ # Extract discrete codes from EnCodec
+ with torch.no_grad():
+ encoded_frames = model.encode(wav.unsqueeze(0))
+ codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T]
+
+ # get seconds of audio
+ # seconds = wav.shape[-1] / model.sample_rate
+ # generate semantic tokens
+ # semantic_tokens = generate_text_semantic(text, max_gen_duration_s=seconds, top_k=50, top_p=.95, temp=0.7)
+
+ # move codes to cpu
+ codes = codes.cpu().numpy()
+ # move semantic tokens to cpu
+ semantic_tokens = semantic_tokens.cpu().numpy()
+
+ import numpy as np
+ output_path = dest_filename + '.npz'
+ np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens)
+ return "Finished"
diff --git a/cloning/clonevoice_old.py b/cloning/clonevoice_old.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ac4610806c2b79d5ab22567064e73c41b3c01fa
--- /dev/null
+++ b/cloning/clonevoice_old.py
@@ -0,0 +1,41 @@
+from bark.generation import load_codec_model, generate_text_semantic, grab_best_device
+from encodec.utils import convert_audio
+import torchaudio
+import torch
+import os
+import gradio
+
+
+def clone_voice(audio_filepath, text, dest_filename, progress=gradio.Progress(track_tqdm=True)):
+ if len(text) < 1:
+ raise gradio.Error('No transcription text entered!')
+
+ use_gpu = not os.environ.get("BARK_FORCE_CPU", False)
+ progress(0, desc="Loading Codec")
+ model = load_codec_model(use_gpu=use_gpu)
+ progress(0.25, desc="Converting WAV")
+
+ # Load and pre-process the audio waveform
+ device = grab_best_device(use_gpu)
+ wav, sr = torchaudio.load(audio_filepath)
+ wav = convert_audio(wav, sr, model.sample_rate, model.channels)
+ wav = wav.unsqueeze(0).to(device)
+ progress(0.5, desc="Extracting codes")
+
+ # Extract discrete codes from EnCodec
+ with torch.no_grad():
+ encoded_frames = model.encode(wav)
+ codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T]
+
+ # get seconds of audio
+ seconds = wav.shape[-1] / model.sample_rate
+ # generate semantic tokens
+ semantic_tokens = generate_text_semantic(text, max_gen_duration_s=seconds, top_k=50, top_p=.95, temp=0.7)
+
+ # move codes to cpu
+ codes = codes.cpu().numpy()
+
+ import numpy as np
+ output_path = dest_filename + '.npz'
+ np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens)
+ return "Finished"
diff --git a/config.yaml b/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bfa57eeb396f948e9aa3b416d756f1b58c60e788
--- /dev/null
+++ b/config.yaml
@@ -0,0 +1,8 @@
+input_text_desired_length: 110
+input_text_max_length: 280
+selected_theme: freddyaboulton/dracula_revamped
+server_name: '0.0.0.0'
+server_port: 0
+server_share: false
+silence_between_sentences: 250
+silence_between_speakers: 500
diff --git a/outputs/02-25-2026/audioclip_22-01-33_s1.npz b/outputs/02-25-2026/audioclip_22-01-33_s1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9cb41b5d8139d01f89fc50ec80955f71d6d5c735
--- /dev/null
+++ b/outputs/02-25-2026/audioclip_22-01-33_s1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:76baee3e7755ad59bc351f0a16aa850831b39fb3b337883043641b0241875cce
+size 50020
diff --git a/outputs/02-25-2026/audioclip_22-05-17_s1.npz b/outputs/02-25-2026/audioclip_22-05-17_s1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..9d9684afd506558057279b76d54445c15a0b2954
--- /dev/null
+++ b/outputs/02-25-2026/audioclip_22-05-17_s1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed3401001e8eb65fdd2382111f21184db3ddae4a2e8f0a0cfc20c06dc1d6ffa7
+size 51740
diff --git a/outputs/02-25-2026/final_21-53-52_s1.wav b/outputs/02-25-2026/final_21-53-52_s1.wav
new file mode 100644
index 0000000000000000000000000000000000000000..9cef0b40e0a143a9524011bd56f2ff06b1464774
--- /dev/null
+++ b/outputs/02-25-2026/final_21-53-52_s1.wav
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e927153ec3ba308a7d5f4b1d9d985e8c3c9a5f9cdc9dfb8993adaa374594878a
+size 1333824
diff --git a/outputs/02-25-2026/final_21-57-01_s1.wav b/outputs/02-25-2026/final_21-57-01_s1.wav
new file mode 100644
index 0000000000000000000000000000000000000000..9cef0b40e0a143a9524011bd56f2ff06b1464774
--- /dev/null
+++ b/outputs/02-25-2026/final_21-57-01_s1.wav
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e927153ec3ba308a7d5f4b1d9d985e8c3c9a5f9cdc9dfb8993adaa374594878a
+size 1333824
diff --git a/outputs/02-25-2026/final_22-01-33_s1.wav b/outputs/02-25-2026/final_22-01-33_s1.wav
new file mode 100644
index 0000000000000000000000000000000000000000..eb5a7664be5a87bfc298f4b58f753d02e79ca881
--- /dev/null
+++ b/outputs/02-25-2026/final_22-01-33_s1.wav
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a59dac16205f97d49c9f4d3bf93bdb0bb39afadaee9ce6a3b9513f0c1ea1d7f
+size 1391500
diff --git a/outputs/02-25-2026/final_22-05-17_s1.wav b/outputs/02-25-2026/final_22-05-17_s1.wav
new file mode 100644
index 0000000000000000000000000000000000000000..b8a7991a0e7b9bd88bc641f70e3c5ff093d35ca6
--- /dev/null
+++ b/outputs/02-25-2026/final_22-05-17_s1.wav
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a088843ff044801b22a3806fcb39577c0b0445e2719048ea8f46e618c3c83cf5
+size 1440140
diff --git a/requirements.txt b/requirements.txt
index 4edf952d1017e8e5223ff66050aa91e83736cc6e..26442d7c9a958328f308d6fd099cfda0672b4df8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,16 @@
-torch
-torchaudio
-transformers==4.34.1
-scipy
-pytube
-pydub
-moviepy
-coqui-tts
\ No newline at end of file
+fairseq; platform_system != "Windows"
+fairseq@https://github.com/Sharrnah/fairseq/releases/download/v0.12.4/fairseq-0.12.4-cp310-cp310-win_amd64.whl; platform_system == "Windows"
+audiolm-pytorch
+gradio
+funcy
+linkify
+mutagen
+pytorch_seed
+pyyaml
+sentencepiece
+soundfile; platform_system == "Windows"
+sox; platform_system != "Windows"
+transformers
+
+
+torch==2.5.0a0
\ No newline at end of file
diff --git a/swap_voice.py b/swap_voice.py
new file mode 100644
index 0000000000000000000000000000000000000000..be1135be3648f9757046de1f9a4e240bd818be5a
--- /dev/null
+++ b/swap_voice.py
@@ -0,0 +1,62 @@
+from bark.generation import load_codec_model, generate_text_semantic, grab_best_device
+from bark import SAMPLE_RATE
+from encodec.utils import convert_audio
+from bark.hubert.hubert_manager import HuBERTManager
+from bark.hubert.pre_kmeans_hubert import CustomHubert
+from bark.hubert.customtokenizer import CustomTokenizer
+from bark.api import semantic_to_waveform
+from scipy.io.wavfile import write as write_wav
+from util.helper import create_filename
+from util.settings import Settings
+
+
+import torchaudio
+import torch
+import os
+import gradio
+
+def swap_voice_from_audio(swap_audio_filename, selected_speaker, tokenizer_lang, seed, batchcount, progress=gradio.Progress(track_tqdm=True)):
+ use_gpu = not os.environ.get("BARK_FORCE_CPU", False)
+ progress(0, desc="Loading Codec")
+
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
+ hubert_manager = HuBERTManager()
+ hubert_manager.make_sure_hubert_installed()
+ hubert_manager.make_sure_tokenizer_installed(tokenizer_lang=tokenizer_lang)
+
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
+ # Load HuBERT for semantic tokens
+
+ # Load the HuBERT model
+ device = grab_best_device(use_gpu)
+ hubert_model = CustomHubert(checkpoint_path='./models/hubert/hubert.pt').to(device)
+ model = load_codec_model(use_gpu=use_gpu)
+
+ # Load the CustomTokenizer model
+ tokenizer = CustomTokenizer.load_from_checkpoint(f'./models/hubert/{tokenizer_lang}_tokenizer.pth').to(device) # Automatically uses the right layers
+
+ progress(0.25, desc="Converting WAV")
+
+ # Load and pre-process the audio waveform
+ wav, sr = torchaudio.load(swap_audio_filename)
+ if wav.shape[0] == 2: # Stereo to mono if needed
+ wav = wav.mean(0, keepdim=True)
+
+ wav = convert_audio(wav, sr, model.sample_rate, model.channels)
+ wav = wav.to(device)
+ semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate)
+ semantic_tokens = tokenizer.get_token(semantic_vectors)
+
+ audio = semantic_to_waveform(
+ semantic_tokens,
+ history_prompt=selected_speaker,
+ temp=0.7,
+ silent=False,
+ output_full=False)
+
+ settings = Settings('config.yaml')
+
+ result = create_filename(settings.output_folder_path, None, "swapvoice",".wav")
+ write_wav(result, SAMPLE_RATE, audio)
+ return result
+
diff --git a/training/__init__.py b/training/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/training/__pycache__/__init__.cpython-313.pyc b/training/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ebbfed4e99dfd093974b75fe58d2d9b937e9812
Binary files /dev/null and b/training/__pycache__/__init__.cpython-313.pyc differ
diff --git a/training/__pycache__/data.cpython-313.pyc b/training/__pycache__/data.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e057cedc1d1ad0fc9e8196bef8bb90c175614084
Binary files /dev/null and b/training/__pycache__/data.cpython-313.pyc differ
diff --git a/training/__pycache__/train.cpython-313.pyc b/training/__pycache__/train.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d785d0552b7a0a63d62e30ec29b636f799f84e7f
Binary files /dev/null and b/training/__pycache__/train.cpython-313.pyc differ
diff --git a/training/__pycache__/training_prepare.cpython-313.pyc b/training/__pycache__/training_prepare.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0dfaa6fc96662be45f9b7192d33d021d1ca57048
Binary files /dev/null and b/training/__pycache__/training_prepare.cpython-313.pyc differ
diff --git a/training/data.py b/training/data.py
new file mode 100644
index 0000000000000000000000000000000000000000..dedf4c414823d374ed7123cdcef451500ddb6564
--- /dev/null
+++ b/training/data.py
@@ -0,0 +1,52 @@
+import random
+import requests
+import os, glob
+
+# english literature
+books = [
+ 'https://www.gutenberg.org/cache/epub/1513/pg1513.txt',
+ 'https://www.gutenberg.org/files/2701/2701-0.txt',
+ 'https://www.gutenberg.org/cache/epub/84/pg84.txt',
+ 'https://www.gutenberg.org/cache/epub/2641/pg2641.txt',
+ 'https://www.gutenberg.org/cache/epub/1342/pg1342.txt',
+ 'https://www.gutenberg.org/cache/epub/100/pg100.txt'
+ ]
+
+#default english
+# allowed_chars = ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=\"\':;[]{}/<>,.`~\n\\'
+
+#german
+allowed_chars = ' aäbcdefghijklmnoöpqrsßtuüvwxyzABCDEFGHIJKLMNOÖPQRSTUÜVWXYZ0123456789!@#$%^&*()-_+=\"\':;[]{}/<>,.`~\n\\'
+
+
+def download_book(book):
+ return requests.get(book).content.decode('utf-8')
+
+
+def filter_data(data):
+ print('Filtering data')
+ return ''.join([char for char in data if char in allowed_chars])
+
+
+def load_books(fromfolder=False):
+ text_data = []
+ if fromfolder:
+ current_working_directory = os.getcwd()
+ print(current_working_directory)
+ path = 'text'
+ for filename in glob.glob(os.path.join(path, '*.txt')):
+ with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
+ print(f'Loading {filename}')
+ text_data.append(filter_data(str(f.read())))
+ else:
+ print(f'Loading {len(books)} books into ram')
+ for book in books:
+ text_data.append(filter_data(str(download_book(book))))
+ print('Loaded books')
+ return ' '.join(text_data)
+
+
+def random_split_chunk(data, size=14):
+ data = data.split(' ')
+ index = random.randrange(0, len(data))
+ return ' '.join(data[index:index+size])
diff --git a/training/train.py b/training/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..be0cccc6145b46d026831cb71f198d2292fae931
--- /dev/null
+++ b/training/train.py
@@ -0,0 +1,47 @@
+import os
+import fnmatch
+import shutil
+
+import numpy
+import torchaudio
+import gradio
+
+from bark.hubert.pre_kmeans_hubert import CustomHubert
+from bark.hubert.customtokenizer import auto_train
+from tqdm.auto import tqdm
+
+
+def training_prepare_files(path, model,progress=gradio.Progress(track_tqdm=True)):
+
+ semanticsfolder = "./training/data/output"
+ wavfolder = "./training/data/output_wav"
+ ready = os.path.join(path, 'ready')
+
+ testfiles = fnmatch.filter(os.listdir(ready), '*.npy')
+ if(len(testfiles) < 1):
+ # prepare and copy for training
+ hubert_model = CustomHubert(checkpoint_path=model)
+
+ wavfiles = fnmatch.filter(os.listdir(wavfolder), '*.wav')
+ for i, f in tqdm(enumerate(wavfiles), total=len(wavfiles)):
+ semaname = '.'.join(f.split('.')[:-1]) # Cut off the extension
+ semaname = f'{semaname}.npy'
+ semafilename = os.path.join(semanticsfolder, semaname)
+ if not os.path.isfile(semafilename):
+ print(f'Skipping {f} no semantics pair found!')
+ continue
+
+ print('Processing', f)
+ wav, sr = torchaudio.load(os.path.join(wavfolder, f))
+ if wav.shape[0] == 2: # Stereo to mono if needed
+ wav = wav.mean(0, keepdim=True)
+ output = hubert_model.forward(wav, input_sample_hz=sr)
+ out_array = output.cpu().numpy()
+ fname = f'{i}_semantic_features.npy'
+ numpy.save(os.path.join(ready, fname), out_array)
+ fname = f'{i}_semantic.npy'
+ shutil.copy(semafilename, os.path.join(ready, fname))
+
+def train(path, save_every, max_epochs):
+ auto_train(path, save_epochs=save_every)
+
diff --git a/training/training_prepare.py b/training/training_prepare.py
new file mode 100644
index 0000000000000000000000000000000000000000..e783489c4b69f74a77c0b02ab9b16259f066b45d
--- /dev/null
+++ b/training/training_prepare.py
@@ -0,0 +1,76 @@
+import random
+import uuid
+import numpy
+import os
+import random
+import fnmatch
+
+from tqdm.auto import tqdm
+from scipy.io import wavfile
+
+from bark.generation import load_model, SAMPLE_RATE
+from bark.api import semantic_to_waveform
+
+from bark import text_to_semantic
+from bark.generation import load_model
+
+from training.data import load_books, random_split_chunk
+
+output = 'training/data/output'
+output_wav = 'training/data/output_wav'
+
+
+def prepare_semantics_from_text(num_generations):
+ loaded_data = load_books(True)
+
+ use_gpu = not os.environ.get("BARK_FORCE_CPU", False)
+
+ print('Loading semantics model')
+ load_model(use_gpu=True, use_small=False, force_reload=False, model_type='text')
+
+ if not os.path.isdir(output):
+ os.mkdir(output)
+
+ loop = 1
+ while 1:
+ filename = uuid.uuid4().hex + '.npy'
+ file_name = os.path.join(output, filename)
+ text = ''
+ while not len(text) > 0:
+ text = random_split_chunk(loaded_data) # Obtain a short chunk of text
+ text = text.strip()
+ print(f'{loop} Generating semantics for text:', text)
+ loop += 1
+ semantics = text_to_semantic(text, temp=round(random.uniform(0.6, 0.8), ndigits=2))
+ numpy.save(file_name, semantics)
+
+
+def prepare_wavs_from_semantics():
+ if not os.path.isdir(output):
+ raise Exception('No \'output\' folder, make sure you run create_data.py first!')
+ if not os.path.isdir(output_wav):
+ os.mkdir(output_wav)
+
+ use_gpu = not os.environ.get("BARK_FORCE_CPU", False)
+
+ print('Loading coarse model')
+ load_model(use_gpu=use_gpu, use_small=False, force_reload=False, model_type='coarse')
+ print('Loading fine model')
+ load_model(use_gpu=use_gpu, use_small=False, force_reload=False, model_type='fine')
+
+ files = fnmatch.filter(os.listdir(output), '*.npy')
+ current = 1
+ total = len(files)
+
+ for i, f in tqdm(enumerate(files), total=len(files)):
+ real_name = '.'.join(f.split('.')[:-1]) # Cut off the extension
+ file_name = os.path.join(output, f)
+ out_file = os.path.join(output_wav, f'{real_name}.wav')
+ if not os.path.isfile(out_file) and os.path.isfile(file_name): # Don't process files that have already been processed, to be able to continue previous generations
+ print(f'Processing ({i+1}/{total}) -> {f}')
+ wav = semantic_to_waveform(numpy.load(file_name), temp=round(random.uniform(0.6, 0.8), ndigits=2))
+ # Change to PCM16
+ # wav = (wav * 32767).astype(np.int16)
+ wavfile.write(out_file, SAMPLE_RATE, wav)
+
+ print('Done!')
diff --git a/util/__init__.py b/util/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/util/__pycache__/__init__.cpython-313.pyc b/util/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ffd81ea9aaf532544230009bc26c257a331696cc
Binary files /dev/null and b/util/__pycache__/__init__.cpython-313.pyc differ
diff --git a/util/__pycache__/helper.cpython-313.pyc b/util/__pycache__/helper.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7cee7410d1a3ff4b48b88db34078aaef5c95daa5
Binary files /dev/null and b/util/__pycache__/helper.cpython-313.pyc differ
diff --git a/util/__pycache__/parseinput.cpython-313.pyc b/util/__pycache__/parseinput.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..06000a86c41e01e67dfae3c2bdb2c1cf5642e608
Binary files /dev/null and b/util/__pycache__/parseinput.cpython-313.pyc differ
diff --git a/util/__pycache__/settings.cpython-313.pyc b/util/__pycache__/settings.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71bbddf056740067e7807a594d347065b5dbe62c
Binary files /dev/null and b/util/__pycache__/settings.cpython-313.pyc differ
diff --git a/util/helper.py b/util/helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..185613661a2f450e55a5d2add1a1e75bc08f5c19
--- /dev/null
+++ b/util/helper.py
@@ -0,0 +1,35 @@
+import os
+from datetime import datetime
+from mutagen.wave import WAVE
+from mutagen.id3._frames import *
+
+def create_filename(path, seed, name, extension):
+ now = datetime.now()
+ date_str =now.strftime("%m-%d-%Y")
+ outputs_folder = os.path.join(os.getcwd(), path)
+ if not os.path.exists(outputs_folder):
+ os.makedirs(outputs_folder)
+
+ sub_folder = os.path.join(outputs_folder, date_str)
+ if not os.path.exists(sub_folder):
+ os.makedirs(sub_folder)
+
+ time_str = now.strftime("%H-%M-%S")
+ if seed == None:
+ file_name = f"{name}_{time_str}{extension}"
+ else:
+ file_name = f"{name}_{time_str}_s{seed}{extension}"
+ return os.path.join(sub_folder, file_name)
+
+
+def add_id3_tag(filename, text, speakername, seed):
+ audio = WAVE(filename)
+ if speakername == None:
+ speakername = "Unconditional"
+
+ # write id3 tag with text truncated to 60 chars, as a precaution...
+ audio["TIT2"] = TIT2(encoding=3, text=text[:60])
+ audio["TPE1"] = TPE1(encoding=3, text=f"Voice {speakername} using Seed={seed}")
+ audio["TPUB"] = TPUB(encoding=3, text="Bark by Suno AI")
+ audio["COMMENT"] = COMM(encoding=3, text="Generated with Bark GUI - Text-Prompted Generative Audio Model. Visit https://github.com/C0untFloyd/bark-gui")
+ audio.save()
diff --git a/util/parseinput.py b/util/parseinput.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2102648cf169f0a52bb66755308fee5f81247e0
--- /dev/null
+++ b/util/parseinput.py
@@ -0,0 +1,129 @@
+import re
+import xml.etree.ElementTree as ET
+from xml.sax import saxutils
+#import nltk
+
+# Chunked generation originally from https://github.com/serp-ai/bark-with-voice-clone
+def split_and_recombine_text(text, desired_length=100, max_length=150):
+ # return nltk.sent_tokenize(text)
+
+ # from https://github.com/neonbjb/tortoise-tts
+ """Split text it into chunks of a desired length trying to keep sentences intact."""
+ # normalize text, remove redundant whitespace and convert non-ascii quotes to ascii
+ text = re.sub(r"\n\n+", "\n", text)
+ text = re.sub(r"\s+", " ", text)
+ text = re.sub(r"[“”]", '"', text)
+
+ rv = []
+ in_quote = False
+ current = ""
+ split_pos = []
+ pos = -1
+ end_pos = len(text) - 1
+
+ def seek(delta):
+ nonlocal pos, in_quote, current
+ is_neg = delta < 0
+ for _ in range(abs(delta)):
+ if is_neg:
+ pos -= 1
+ current = current[:-1]
+ else:
+ pos += 1
+ current += text[pos]
+ if text[pos] == '"':
+ in_quote = not in_quote
+ return text[pos]
+
+ def peek(delta):
+ p = pos + delta
+ return text[p] if p < end_pos and p >= 0 else ""
+
+ def commit():
+ nonlocal rv, current, split_pos
+ rv.append(current)
+ current = ""
+ split_pos = []
+
+ while pos < end_pos:
+ c = seek(1)
+ # do we need to force a split?
+ if len(current) >= max_length:
+ if len(split_pos) > 0 and len(current) > (desired_length / 2):
+ # we have at least one sentence and we are over half the desired length, seek back to the last split
+ d = pos - split_pos[-1]
+ seek(-d)
+ else:
+ # no full sentences, seek back until we are not in the middle of a word and split there
+ while c not in "!?.,\n " and pos > 0 and len(current) > desired_length:
+ c = seek(-1)
+ commit()
+ # check for sentence boundaries
+ elif not in_quote and (c in "!?]\n" or (c == "." and peek(1) in "\n ")):
+ # seek forward if we have consecutive boundary markers but still within the max length
+ while (
+ pos < len(text) - 1 and len(current) < max_length and peek(1) in "!?.]"
+ ):
+ c = seek(1)
+ split_pos.append(pos)
+ if len(current) >= desired_length:
+ commit()
+ # treat end of quote as a boundary if its followed by a space or newline
+ elif in_quote and peek(1) == '"' and peek(2) in "\n ":
+ seek(2)
+ split_pos.append(pos)
+ rv.append(current)
+
+ # clean up, remove lines with only whitespace or punctuation
+ rv = [s.strip() for s in rv]
+ rv = [s for s in rv if len(s) > 0 and not re.match(r"^[\s\.,;:!?]*$", s)]
+
+ return rv
+
+def is_ssml(value):
+ try:
+ ET.fromstring(value)
+ except ET.ParseError:
+ return False
+ return True
+
+def build_ssml(rawtext, selected_voice):
+ texts = rawtext.split("\n")
+ joinedparts = ""
+ for textpart in texts:
+ textpart = textpart.strip()
+ if len(textpart) < 1:
+ continue
+ joinedparts = joinedparts + f"\n{saxutils.escape(textpart)}"
+ ssml = f"""
+
+ {joinedparts}
+
+ """
+ return ssml
+
+def create_clips_from_ssml(ssmlinput):
+ # Parse the XML
+ tree = ET.ElementTree(ET.fromstring(ssmlinput))
+ root = tree.getroot()
+
+ # Create an empty list
+ voice_list = []
+
+ # Loop through all voice tags
+ for voice in root.iter('{http://www.w3.org/2001/10/synthesis}voice'):
+ # Extract the voice name attribute and the content text
+ voice_name = voice.attrib['name']
+ voice_content = voice.text.strip() if voice.text else ''
+ if(len(voice_content) > 0):
+ parts = split_and_recombine_text(voice_content)
+ for p in parts:
+ if(len(p) > 1):
+ # add to tuple list
+ voice_list.append((voice_name, p))
+ return voice_list
+
diff --git a/util/settings.py b/util/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ab66b0c7605d2b877defdd8592097a8a4c6f21a
--- /dev/null
+++ b/util/settings.py
@@ -0,0 +1,41 @@
+import yaml
+
+class Settings:
+ def __init__(self, config_file):
+ self.config_file = config_file
+ self.load()
+
+ def load(self):
+ try:
+ with open(self.config_file, 'r') as f:
+ data = yaml.load(f, Loader=yaml.FullLoader)
+ self.selected_theme = data.get('selected_theme', "gstaff/xkcd")
+ self.server_name = data.get('server_name', "")
+ self.server_port = data.get('server_port', 0)
+ self.server_share = data.get('server_share', False)
+ self.input_text_desired_length = data.get('input_text_desired_length', 110)
+ self.input_text_max_length = data.get('input_text_max_length', 170)
+ self.silence_sentence = data.get('silence_between_sentences', 250)
+ self.silence_speakers = data.get('silence_between_speakers', 500)
+ self.output_folder_path = data.get('output_folder_path', 'outputs')
+
+ except:
+ self.selected_theme = "gstaff/xkcd"
+
+ def save(self):
+ data = {
+ 'selected_theme': self.selected_theme,
+ 'server_name': self.server_name,
+ 'server_port': self.server_port,
+ 'server_share': self.server_share,
+ 'input_text_desired_length' : self.input_text_desired_length,
+ 'input_text_max_length' : self.input_text_max_length,
+ 'silence_between_sentences': self.silence_sentence,
+ 'silence_between_speakers': self.silence_speakers,
+ 'output_folder_path': self.output_folder_path
+ }
+ with open(self.config_file, 'w') as f:
+ yaml.dump(data, f)
+
+
+
diff --git a/webui.py b/webui.py
new file mode 100644
index 0000000000000000000000000000000000000000..82423bc7d4e3be0674d8be2abb745fa9d9c15187
--- /dev/null
+++ b/webui.py
@@ -0,0 +1,465 @@
+from cProfile import label
+import dataclasses
+from distutils.command.check import check
+from doctest import Example
+import gradio as gr
+import os
+import sys
+import numpy as np
+import logging
+import torch
+import pytorch_seed
+import time
+
+from xml.sax import saxutils
+from bark.api import generate_with_settings
+from bark.api import save_as_prompt
+from util.settings import Settings
+# import nltk
+
+from bark import SAMPLE_RATE
+from cloning.clonevoice import clone_voice
+from bark.generation import SAMPLE_RATE, preload_models, _load_history_prompt, codec_decode
+from scipy.io.wavfile import write as write_wav
+from util.parseinput import split_and_recombine_text, build_ssml, is_ssml, create_clips_from_ssml
+from datetime import datetime
+from tqdm.auto import tqdm
+from util.helper import create_filename, add_id3_tag
+from swap_voice import swap_voice_from_audio
+from training.training_prepare import prepare_semantics_from_text, prepare_wavs_from_semantics
+from training.train import training_prepare_files, train
+
+settings = Settings('config.yaml')
+
+
+def generate_text_to_speech(text, selected_speaker, text_temp, waveform_temp, eos_prob, quick_generation, complete_settings, seed, batchcount, progress=gr.Progress(track_tqdm=True)):
+ # Chunk the text into smaller pieces then combine the generated audio
+
+ # generation settings
+ if selected_speaker == 'None':
+ selected_speaker = None
+
+ voice_name = selected_speaker
+
+ if text == None or len(text) < 1:
+ if selected_speaker == None:
+ raise gr.Error('No text entered!')
+
+ # Extract audio data from speaker if no text and speaker selected
+ voicedata = _load_history_prompt(voice_name)
+ audio_arr = codec_decode(voicedata["fine_prompt"])
+ result = create_filename(settings.output_folder_path, "None", "extract", ".wav")
+ save_wav(audio_arr, result)
+ return result
+
+ if batchcount < 1:
+ batchcount = 1
+
+ silenceshort = np.zeros(int((float(settings.silence_sentence) / 1000.0) * SAMPLE_RATE), dtype=np.int16) # quarter second of silence
+ silencelong = np.zeros(int((float(settings.silence_speakers) / 1000.0) * SAMPLE_RATE), dtype=np.float32) # half a second of silence
+ use_last_generation_as_history = "Use last generation as history" in complete_settings
+ save_last_generation = "Save generation as Voice" in complete_settings
+ for l in range(batchcount):
+ currentseed = seed
+ if seed != None and seed > 2**32 - 1:
+ logger.warning(f"Seed {seed} > 2**32 - 1 (max), setting to random")
+ currentseed = None
+ if currentseed == None or currentseed <= 0:
+ currentseed = np.random.default_rng().integers(1, 2**32 - 1)
+ assert (0 < currentseed and currentseed < 2**32)
+
+ progress(0, desc="Generating")
+
+ full_generation = None
+
+ all_parts = []
+ complete_text = ""
+ text = text.lstrip()
+ if is_ssml(text):
+ list_speak = create_clips_from_ssml(text)
+ prev_speaker = None
+ for i, clip in tqdm(enumerate(list_speak), total=len(list_speak)):
+ selected_speaker = clip[0]
+ # Add pause break between speakers
+ if i > 0 and selected_speaker != prev_speaker:
+ all_parts += [silencelong.copy()]
+ prev_speaker = selected_speaker
+ text = clip[1]
+ text = saxutils.unescape(text)
+ if selected_speaker == "None":
+ selected_speaker = None
+
+ print(f"\nGenerating Text ({i+1}/{len(list_speak)}) -> {selected_speaker} (Seed {currentseed}):`{text}`")
+ complete_text += text
+ with pytorch_seed.SavedRNG(currentseed):
+ audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
+ currentseed = torch.random.initial_seed()
+ if len(list_speak) > 1:
+ filename = create_filename(settings.output_folder_path, currentseed, "audioclip", ".wav")
+ save_wav(audio_array, filename)
+ add_id3_tag(filename, text, selected_speaker, currentseed)
+
+ all_parts += [audio_array]
+ else:
+ texts = split_and_recombine_text(text, settings.input_text_desired_length, settings.input_text_max_length)
+ for i, text in tqdm(enumerate(texts), total=len(texts)):
+ print(f"\nGenerating Text ({i+1}/{len(texts)}) -> {selected_speaker} (Seed {currentseed}):`{text}`")
+ complete_text += text
+ if quick_generation == True:
+ with pytorch_seed.SavedRNG(currentseed):
+ audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
+ currentseed = torch.random.initial_seed()
+ else:
+ full_output = use_last_generation_as_history or save_last_generation
+ if full_output:
+ full_generation, audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob, output_full=True)
+ else:
+ audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
+
+ # Noticed this in the HF Demo - convert to 16bit int -32767/32767 - most used audio format
+ # audio_array = (audio_array * 32767).astype(np.int16)
+
+ if len(texts) > 1:
+ filename = create_filename(settings.output_folder_path, currentseed, "audioclip", ".wav")
+ save_wav(audio_array, filename)
+ add_id3_tag(filename, text, selected_speaker, currentseed)
+
+ if quick_generation == False and (save_last_generation == True or use_last_generation_as_history == True):
+ # save to npz
+ voice_name = create_filename(settings.output_folder_path, seed, "audioclip", ".npz")
+ save_as_prompt(voice_name, full_generation)
+ if use_last_generation_as_history:
+ selected_speaker = voice_name
+
+ all_parts += [audio_array]
+ # Add short pause between sentences
+ if text[-1] in "!?.\n" and i > 1:
+ all_parts += [silenceshort.copy()]
+
+ # save & play audio
+ result = create_filename(settings.output_folder_path, currentseed, "final", ".wav")
+ save_wav(np.concatenate(all_parts), result)
+ # write id3 tag with text truncated to 60 chars, as a precaution...
+ add_id3_tag(result, complete_text, selected_speaker, currentseed)
+
+ return result
+
+
+def save_wav(audio_array, filename):
+ write_wav(filename, SAMPLE_RATE, audio_array)
+
+
+def save_voice(filename, semantic_prompt, coarse_prompt, fine_prompt):
+ np.savez_compressed(
+ filename,
+ semantic_prompt=semantic_prompt,
+ coarse_prompt=coarse_prompt,
+ fine_prompt=fine_prompt
+ )
+
+
+def on_quick_gen_changed(checkbox):
+ if checkbox == False:
+ return gr.CheckboxGroup.update(visible=True)
+ return gr.CheckboxGroup.update(visible=False)
+
+
+def delete_output_files(checkbox_state):
+ if checkbox_state:
+ outputs_folder = os.path.join(os.getcwd(), settings.output_folder_path)
+ if os.path.exists(outputs_folder):
+ purgedir(outputs_folder)
+ return False
+
+
+# https://stackoverflow.com/a/54494779
+def purgedir(parent):
+ for root, dirs, files in os.walk(parent):
+ for item in files:
+ # Delete subordinate files
+ filespec = os.path.join(root, item)
+ os.unlink(filespec)
+ for item in dirs:
+ # Recursively perform this operation for subordinate directories
+ purgedir(os.path.join(root, item))
+
+
+def convert_text_to_ssml(text, selected_speaker):
+ return build_ssml(text, selected_speaker)
+
+
+def training_prepare(selected_step, num_text_generations, progress=gr.Progress(track_tqdm=True)):
+ if selected_step == prepare_training_list[0]:
+ prepare_semantics_from_text(num_text_generations)
+ else:
+ prepare_wavs_from_semantics()
+ return None
+
+
+def start_training(save_model_epoch, max_epochs, progress=gr.Progress(track_tqdm=True)):
+ training_prepare_files("./training/data/", "./training/data/checkpoint/hubert_base_ls960.pt")
+ train("./training/data/", save_model_epoch, max_epochs)
+ return None
+
+
+def apply_settings(themes, input_server_name, input_server_port, input_server_public, input_desired_len, input_max_len, input_silence_break, input_silence_speaker):
+ settings.selected_theme = themes
+ settings.server_name = input_server_name
+ settings.server_port = input_server_port
+ settings.server_share = input_server_public
+ settings.input_text_desired_length = input_desired_len
+ settings.input_text_max_length = input_max_len
+ settings.silence_sentence = input_silence_break
+ settings.silence_speaker = input_silence_speaker
+ settings.save()
+
+
+def restart():
+ global restart_server
+ restart_server = True
+
+
+def create_version_html():
+ python_version = ".".join([str(x) for x in sys.version_info[0:3]])
+ versions_html = f"""
+python: {python_version}
+ •
+torch: {getattr(torch, '__long_version__', torch.__version__)}
+ •
+gradio: {gr.__version__}
+"""
+ return versions_html
+
+
+logger = logging.getLogger(__name__)
+APPTITLE = "Bark UI Enhanced v0.7.4"
+
+
+autolaunch = False
+
+if len(sys.argv) > 1:
+ autolaunch = "-autolaunch" in sys.argv
+
+
+if torch.cuda.is_available() == False and torch.backends.mps.is_available():
+ os.environ['BARK_FORCE_CPU'] = 'True'
+ logger.warning("No CUDA or MPS detected, fallback to CPU!")
+
+
+print(f'smallmodels={os.environ.get("SUNO_USE_SMALL_MODELS", False)}')
+print(f'enablemps={os.environ.get("SUNO_ENABLE_MPS", False)}')
+print(f'offloadcpu={os.environ.get("SUNO_OFFLOAD_CPU", False)}')
+print(f'forcecpu={os.environ.get("BARK_FORCE_CPU", False)}')
+print(f'autolaunch={autolaunch}\n\n')
+
+# print("Updating nltk\n")
+# nltk.download('punkt')
+
+print("Preloading Models\n")
+preload_models()
+
+available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"]
+tokenizer_language_list = ["de", "en", "es", "pl"]
+prepare_training_list = ["Step 1: Semantics from Text", "Step 2: WAV from Semantics"]
+
+seed = 42
+server_name = settings.server_name
+if len(server_name) < 1:
+ server_name = None
+server_port = settings.server_port
+if server_port <= 0:
+ server_port = None
+global run_server
+global restart_server
+
+run_server = True
+
+while run_server:
+ # Collect all existing speakers/voices in dir
+ speakers_list = []
+
+ for root, dirs, files in os.walk("./bark/assets/prompts"):
+ for file in files:
+ if file.endswith(".npz"):
+ pathpart = root.replace("./bark/assets/prompts", "")
+ name = os.path.join(pathpart, file[:-4])
+ if name.startswith("/") or name.startswith("\\"):
+ name = name[1:]
+ speakers_list.append(name)
+
+ speakers_list = sorted(speakers_list, key=lambda x: x.lower())
+ speakers_list.insert(0, 'None')
+
+ print(f'Launching {APPTITLE} Server')
+
+ # Create Gradio Blocks
+
+ with gr.Blocks(title=f"{APPTITLE}", mode=f"{APPTITLE}", theme=settings.selected_theme) as barkgui:
+ with gr.Row():
+ with gr.Column():
+ gr.Markdown(f"### [{APPTITLE}](https://github.com/C0untFloyd/bark-gui)")
+ with gr.Column():
+ gr.HTML(create_version_html(), elem_id="versions")
+
+ with gr.Tab("TTS"):
+ with gr.Row():
+ with gr.Column():
+ placeholder = "Enter text here."
+ input_text = gr.Textbox(label="Input Text", lines=4, placeholder=placeholder)
+ with gr.Column():
+ seedcomponent = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1)
+ batchcount = gr.Number(label="Batch count", precision=0, value=1)
+ with gr.Row():
+ with gr.Column():
+ examples = [
+ "Special meanings: [laughter] [laughs] [sighs] [music] [gasps] [clears throat] MAN: WOMAN:",
+ "♪ Never gonna make you cry, never gonna say goodbye, never gonna tell a lie and hurt you ♪",
+ "And now — a picture of a larch [laughter]",
+ """
+ WOMAN: I would like an oatmilk latte please.
+ MAN: Wow, that's expensive!
+ """,
+ """
+
+ Look at that drunk guy!
+ Who is he?
+ WOMAN: [clears throat] 10 years ago, he proposed me and I rejected him.
+ Oh my God [laughs] he is still celebrating
+ """
+ ]
+ examples = gr.Examples(examples=examples, inputs=input_text)
+ with gr.Column():
+ convert_to_ssml_button = gr.Button("Convert Input Text to SSML")
+
+ with gr.Row():
+ with gr.Column():
+ gr.Markdown("[Voice Prompt Library](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c)")
+ speaker = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice")
+ with gr.Column():
+ text_temp = gr.Slider(0.1, 1.0, value=0.6, label="Generation Temperature", info="1.0 more diverse, 0.1 more conservative")
+ waveform_temp = gr.Slider(0.1, 1.0, value=0.7, label="Waveform temperature", info="1.0 more diverse, 0.1 more conservative")
+
+ with gr.Row():
+ with gr.Column():
+ quick_gen_checkbox = gr.Checkbox(label="Quick Generation", value=True)
+ settings_checkboxes = ["Use last generation as history", "Save generation as Voice"]
+ complete_settings = gr.CheckboxGroup(choices=settings_checkboxes, value=settings_checkboxes, label="Detailed Generation Settings", type="value", interactive=True, visible=False)
+ with gr.Column():
+ eos_prob = gr.Slider(0.0, 0.5, value=0.05, label="End of sentence probability")
+
+ with gr.Row():
+ with gr.Column():
+ tts_create_button = gr.Button("Generate")
+ with gr.Column():
+ hidden_checkbox = gr.Checkbox(visible=False)
+ button_stop_generation = gr.Button("Stop generation")
+ with gr.Row():
+ output_audio = gr.Audio(label="Generated Audio", type="filepath")
+
+ with gr.Tab("Swap Voice"):
+ with gr.Row():
+ swap_audio_filename = gr.Audio(label="Input audio.wav to swap voice", sources=["upload"], type="filepath")
+ with gr.Row():
+ with gr.Column():
+ swap_tokenizer_lang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1])
+ swap_seed = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1)
+ with gr.Column():
+ speaker_swap = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice")
+ swap_batchcount = gr.Number(label="Batch count", precision=0, value=1)
+ with gr.Row():
+ swap_voice_button = gr.Button("Swap Voice")
+ with gr.Row():
+ output_swap = gr.Audio(label="Generated Audio", type="filepath")
+
+ with gr.Tab("Clone Voice"):
+ with gr.Row():
+ input_audio_filename = gr.Audio(label="Input audio.wav", sources=["upload"], type="filepath")
+ # transcription_text = gr.Textbox(label="Transcription Text", lines=1, placeholder="Enter Text of your Audio Sample here...")
+ with gr.Row():
+ with gr.Column():
+ initialname = "./bark/assets/prompts/custom/MeMyselfAndI"
+ output_voice = gr.Textbox(label="Filename of trained Voice", lines=1, placeholder=initialname, value=initialname)
+ with gr.Column():
+ tokenizerlang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1])
+ with gr.Row():
+ clone_voice_button = gr.Button("Create Voice")
+ with gr.Row():
+ dummy = gr.Text(label="Progress")
+
+ with gr.Tab("Training Data Prepare"):
+ gr.Markdown("This tab should be used to generate the training dataset. For Step 1 put some books into the inputtext folder in UTF-8 Text Format.")
+ prepare_semantics_number = gr.Number(label="Number of semantics to create", precision=0, value=3079)
+ prepare_dropdown = gr.Dropdown(prepare_training_list, value=prepare_training_list[0], label="Prepare")
+ training_prepare_button = gr.Button("Generate")
+ dummytrd = gr.Text(label="Progress")
+
+ with gr.Tab("Training"):
+ with gr.Row():
+ gr.Markdown("This tab is used to train the actual model (language).")
+ with gr.Row():
+ with gr.Column():
+ save_model_epoch = gr.Number(label="Auto-save model after number of epochs", precision=0, value=1)
+ with gr.Column():
+ max_epochs = gr.Number(label="Train for number of epochs", precision=0, value=6)
+ with gr.Row():
+ with gr.Column():
+ allowed_chars = ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=\"\':;[]{}/<>,.`~'
+ allowedcharsfilter = gr.Textbox(label="Allowed chars for text input", lines=1, value=allowed_chars)
+ with gr.Column():
+ train_button = gr.Button("Start Training")
+ with gr.Row():
+ dummytrain = gr.Text(label="Progress")
+
+ with gr.Tab("Settings"):
+ with gr.Row():
+ themes = gr.Dropdown(available_themes, label="Theme", info="Change needs complete restart", value=settings.selected_theme)
+ with gr.Row():
+ input_server_name = gr.Textbox(label="Server Name", lines=1, info="Leave blank to run locally", value=settings.server_name)
+ input_server_port = gr.Number(label="Server Port", precision=0, info="Leave at 0 to use default", value=settings.server_port)
+ share_checkbox = gr.Checkbox(label="Public Server", value=settings.server_share)
+ with gr.Row():
+ input_desired_len = gr.Slider(100, 150, value=settings.input_text_desired_length, label="Desired Input Text Length", info="Ideal length to split input sentences")
+ input_max_len = gr.Slider(150, 256, value=settings.input_text_max_length, label="Max Input Text Length", info="Maximum Input Text Length")
+ with gr.Row():
+ input_silence_break = gr.Slider(1, 1000, value=settings.silence_sentence, label="Sentence Pause Time (ms)", info="Silence between sentences in milliseconds")
+ input_silence_speakers = gr.Slider(1, 5000, value=settings.silence_speakers, label="Speaker Pause Time (ms)", info="Silence between different speakers in milliseconds")
+
+ with gr.Row():
+ button_apply_settings = gr.Button("Apply Settings")
+ button_apply_restart = gr.Button("Restart Server")
+ button_delete_files = gr.Button("Clear output folder")
+
+ quick_gen_checkbox.change(fn=on_quick_gen_changed, inputs=quick_gen_checkbox, outputs=complete_settings)
+ convert_to_ssml_button.click(convert_text_to_ssml, inputs=[input_text, speaker], outputs=input_text)
+ gen_click = tts_create_button.click(generate_text_to_speech, inputs=[input_text, speaker, text_temp, waveform_temp, eos_prob,
+ quick_gen_checkbox, complete_settings, seedcomponent, batchcount], outputs=output_audio)
+ button_stop_generation.click(fn=None, inputs=None, outputs=None, cancels=[gen_click])
+ # Javascript hack to display modal confirmation dialog
+ js = "(x) => confirm('Are you sure? This will remove all files from output folder')"
+ button_delete_files.click(None, None, hidden_checkbox, js=js)
+ hidden_checkbox.change(delete_output_files, [hidden_checkbox], [hidden_checkbox])
+
+ swap_voice_button.click(swap_voice_from_audio, inputs=[swap_audio_filename, speaker_swap, swap_tokenizer_lang, swap_seed, swap_batchcount], outputs=output_swap)
+ clone_voice_button.click(clone_voice, inputs=[input_audio_filename, tokenizerlang, output_voice], outputs=dummy)
+ training_prepare_button.click(training_prepare, inputs=[prepare_dropdown, prepare_semantics_number], outputs=dummytrd)
+ train_button.click(start_training, inputs=[save_model_epoch, max_epochs], outputs=dummytrain)
+ button_apply_settings.click(apply_settings, inputs=[themes, input_server_name, input_server_port, share_checkbox, input_desired_len, input_max_len, input_silence_break, input_silence_speakers])
+ button_apply_restart.click(restart)
+
+ restart_server = False
+ try:
+ barkgui.queue().launch(inbrowser=autolaunch, server_name=server_name, server_port=server_port, share=settings.server_share, prevent_thread_lock=True)
+ except:
+ restart_server = True
+ run_server = False
+ try:
+ while restart_server == False:
+ time.sleep(1.0)
+ except (KeyboardInterrupt, OSError):
+ print("Keyboard interruption in main thread... closing server.")
+ run_server = False
+ barkgui.close()