|
|
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Model, Wav2Vec2Processor, Wav2Vec2CTCTokenizer |
|
|
import torchaudio |
|
|
import torch |
|
|
from huggingface_hub import notebook_login |
|
|
from datasets import load_dataset |
|
|
import re |
|
|
from transformers import Wav2Vec2ForCTC |
|
|
from torch.utils.data import DataLoader |
|
|
|
|
|
organization_name = "ASR-Erzya-Final-Project" |
|
|
|
|
|
dataset_name = "asr_erzya_final_data" |
|
|
|
|
|
dataset = load_dataset(f"{organization_name}/{dataset_name}") |
|
|
|
|
|
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") |
|
|
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") |
|
|
|
|
|
|
|
|
|
|
|
def preprocess_data(batch): |
|
|
inputs = processor(batch["audio"], return_tensors="pt", sampling_rate=16000) |
|
|
targets = processor(batch["text"], return_tensors="pt", padding=True) |
|
|
|
|
|
|
|
|
targets["input_ids"] = targets["input_ids"].unsqueeze(0) |
|
|
|
|
|
return inputs, targets |
|
|
|
|
|
|
|
|
train_data_loader = DataLoader(dataset["train"], batch_size=4, collate_fn=preprocess_data) |
|
|
|
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
model.to(device) |
|
|
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4) |
|
|
|
|
|
for epoch in range(5): |
|
|
model.train() |
|
|
for batch in train_data_loader: |
|
|
inputs, targets = batch |
|
|
inputs = {key: value.to(device) for key, value in inputs.items()} |
|
|
targets = {key: value.to(device) for key, value in targets.items()} |
|
|
|
|
|
optimizer.zero_grad() |
|
|
outputs = model(**inputs, labels=targets["input_ids"]) |
|
|
loss = outputs.loss |
|
|
loss.backward() |
|
|
optimizer.step() |
|
|
|
|
|
|
|
|
model.save_pretrained("your-organization/your-wav2vec-ctc-model") |
|
|
processor.save_pretrained("your-organization/your-wav2vec-ctc-model") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
common_voice_train = load_dataset("common_voice", "myv", split="train") |
|
|
common_voice_test = load_dataset("common_voice", "myv", split="test") |
|
|
|
|
|
|
|
|
common_voice_train = common_voice_train.remove_columns(["accent", "age", "client_id", "down_votes", "gender", "locale", "segment", "up_votes"]) |
|
|
common_voice_test = common_voice_test.remove_columns(["accent", "age", "client_id", "down_votes", "gender", "locale", "segment", "up_votes"]) |
|
|
|
|
|
chars_to_remove_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\']' |
|
|
|
|
|
def remove_special_characters(batch): |
|
|
batch["sentence"] = re.sub(chars_to_remove_regex, '', batch["sentence"]).lower() |
|
|
return batch |
|
|
|
|
|
common_voice_train = common_voice_train.map(remove_special_characters) |
|
|
common_voice_test = common_voice_test.map(remove_special_characters) |
|
|
|
|
|
def replace_hatted_characters(batch): |
|
|
batch["sentence"] = re.sub('[â]', 'a', batch["sentence"]) |
|
|
|
|
|
return batch |
|
|
|
|
|
common_voice_train = common_voice_train.map(replace_hatted_characters) |
|
|
common_voice_test = common_voice_test.map(replace_hatted_characters) |
|
|
|
|
|
def extract_all_chars(batch): |
|
|
all_text = " ".join(batch["sentence"]) |
|
|
vocab = list(set(all_text)) |
|
|
return {"vocab": [vocab], "all_text": [all_text]} |
|
|
|
|
|
vocab_train = common_voice_train.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_train.column_names) |
|
|
vocab_test = common_voice_test.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_test.column_names) |
|
|
|
|
|
vocab_list = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0])) |
|
|
vocab_dict = {v: k for k, v in enumerate(sorted(vocab_list))} |
|
|
vocab_dict["|"] = vocab_dict[" "] |
|
|
del vocab_dict[" "] |
|
|
vocab_dict["[UNK]"] = len(vocab_dict) |
|
|
vocab_dict["[PAD]"] = len(vocab_dict) |
|
|
|
|
|
import json |
|
|
with open('vocab.json', 'w') as vocab_file: |
|
|
json.dump(vocab_dict, vocab_file) |
|
|
|
|
|
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("./", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|") |
|
|
|
|
|
repo_name = "wav2vec2-large-xls-r-300m-tr-colab" |
|
|
tokenizer.push_to_hub(repo_name) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def downsample(batch): |
|
|
resample = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) |
|
|
batch["audio"] = resample(batch["audio"]) |
|
|
return batch |
|
|
|
|
|
|
|
|
common_voice_train = common_voice_train.map(downsample) |
|
|
common_voice_test = common_voice_test.map(downsample) |
|
|
|
|
|
|
|
|
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True) |
|
|
|
|
|
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) |
|
|
|
|
|
''' |
|
|
model = Wav2Vec2Model.from_pretrained("Link/to/HuggingfaceModel") |
|
|
array, fs = torchaudio.load("/Local/link/to/your/audio.wav" |
|
|
input_input = processor(array.squeeze(), sampling_rate=fs, return_tensors="pt") |
|
|
with torch.no_grad(): |
|
|
outputs = model(**input_input) |
|
|
print(f"Hidden state shape: {outputs.last_hidden_state.numpy().shape}") |
|
|
''' |
|
|
|