init
Browse files- tokenize_dataset_s2s.py +1 -1
tokenize_dataset_s2s.py
CHANGED
|
@@ -19,7 +19,7 @@ tokenizer = EncodecTokenizer.from_pretrained()
|
|
| 19 |
|
| 20 |
def tokenize(example):
|
| 21 |
for side in sides:
|
| 22 |
-
wav = torch.as_tensor(example[f"{side}.audio"]["array"].reshape(1, 1, -1))
|
| 23 |
example[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(
|
| 24 |
wav=wav, sample_rate=example[f"{side}.audio"]["sampling_rate"]
|
| 25 |
).numpy().tolist()
|
|
|
|
| 19 |
|
| 20 |
def tokenize(example):
|
| 21 |
for side in sides:
|
| 22 |
+
wav = torch.as_tensor(example[f"{side}.audio"]["array"].reshape(1, 1, -1), dtype=torch.FloatTensor)
|
| 23 |
example[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(
|
| 24 |
wav=wav, sample_rate=example[f"{side}.audio"]["sampling_rate"]
|
| 25 |
).numpy().tolist()
|