| import sys |
|
|
| import torch |
| from transformers import AutoModelForMaskedLM, AutoTokenizer |
|
|
| from config import config |
|
|
| LOCAL_PATH = "./bert/chinese-roberta-wwm-ext-large" |
|
|
| tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH) |
|
|
| models = dict() |
|
|
|
|
| def get_bert_feature( |
| text, |
| word2ph, |
| device=config.bert_gen_config.device, |
| style_text=None, |
| style_weight=0.7, |
| ): |
| if ( |
| sys.platform == "darwin" |
| and torch.backends.mps.is_available() |
| and device == "cpu" |
| ): |
| device = "mps" |
| if not device: |
| device = "cuda" |
| if device not in models.keys(): |
| models[device] = AutoModelForMaskedLM.from_pretrained(LOCAL_PATH).to(device) |
| with torch.no_grad(): |
| inputs = tokenizer(text, return_tensors="pt") |
| for i in inputs: |
| inputs[i] = inputs[i].to(device) |
| res = models[device](**inputs, output_hidden_states=True) |
| res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu() |
| if style_text: |
| style_inputs = tokenizer(style_text, return_tensors="pt") |
| for i in style_inputs: |
| style_inputs[i] = style_inputs[i].to(device) |
| style_res = models[device](**style_inputs, output_hidden_states=True) |
| style_res = torch.cat(style_res["hidden_states"][-3:-2], -1)[0].cpu() |
| style_res_mean = style_res.mean(0) |
| assert len(word2ph) == len(text) + 2 |
| word2phone = word2ph |
| phone_level_feature = [] |
| for i in range(len(word2phone)): |
| if style_text: |
| repeat_feature = ( |
| res[i].repeat(word2phone[i], 1) * (1 - style_weight) |
| + style_res_mean.repeat(word2phone[i], 1) * style_weight |
| ) |
| else: |
| repeat_feature = res[i].repeat(word2phone[i], 1) |
| phone_level_feature.append(repeat_feature) |
|
|
| phone_level_feature = torch.cat(phone_level_feature, dim=0) |
|
|
| return phone_level_feature.T |
|
|
|
|
| if __name__ == "__main__": |
| word_level_feature = torch.rand(38, 1024) |
| word2phone = [ |
| 1, |
| 2, |
| 1, |
| 2, |
| 2, |
| 1, |
| 2, |
| 2, |
| 1, |
| 2, |
| 2, |
| 1, |
| 2, |
| 2, |
| 2, |
| 2, |
| 2, |
| 1, |
| 1, |
| 2, |
| 2, |
| 1, |
| 2, |
| 2, |
| 2, |
| 2, |
| 1, |
| 2, |
| 2, |
| 2, |
| 2, |
| 2, |
| 1, |
| 2, |
| 2, |
| 2, |
| 2, |
| 1, |
| ] |
|
|
| |
| total_frames = sum(word2phone) |
| print(word_level_feature.shape) |
| print(word2phone) |
| phone_level_feature = [] |
| for i in range(len(word2phone)): |
| print(word_level_feature[i].shape) |
|
|
| |
| repeat_feature = word_level_feature[i].repeat(word2phone[i], 1) |
| phone_level_feature.append(repeat_feature) |
|
|
| phone_level_feature = torch.cat(phone_level_feature, dim=0) |
| print(phone_level_feature.shape) |
|
|