| | |
| | |
| | """ |
| | Created on Fri Jun 30 08:47:31 2023 |
| | |
| | @author: fujidai |
| | """ |
| |
|
| |
|
| | import torch |
| | from sentence_transformers import SentenceTransformer, InputExample, losses,models |
| | from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses |
| | from sentence_transformers.readers import InputExample |
| | from torch.utils.data import DataLoader |
| | from transformers import AutoTokenizer |
| | from sentence_transformers.SentenceTransformer import SentenceTransformer |
| | import torch |
| | import torch.nn.functional as F |
| | import numpy as np |
| | from sentence_transformers import SentenceTransformer, util |
| |
|
| |
|
| | word_embedding_model = models.Transformer('/paraphrase-multilingual-mpnet-base-v2', max_seq_length=512) |
| | pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension()) |
| | |
| | model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps') |
| | print(model) |
| |
|
| |
|
| | with open('/cos-sim_pseudo-pseudo.txt', 'r') as f: |
| |
|
| | raberu = f.read() |
| | raberu_lines = raberu.splitlines() |
| | data = [] |
| | for i in range(len(raberu_lines)): |
| | data.append(float(raberu_lines[i])) |
| |
|
| |
|
| | with open('/cos-sim_pseudo.txt', 'r') as f: |
| | raberu2 = f.read() |
| | raberu2_lines = raberu2.splitlines() |
| | data2 = [] |
| | for i in range(len(raberu2_lines)): |
| | data2.append(float(raberu2_lines[i])) |
| |
|
| |
|
| |
|
| | with open('/en-origin.txt', 'r') as f: |
| | left = f.read() |
| | left_lines = left.splitlines() |
| |
|
| | with open('/en-pseudo-pseudo.txt', 'r') as f: |
| | senter = f.read() |
| | senter_lines = senter.splitlines() |
| |
|
| | with open('/en-pseudo.txt', 'r') as f: |
| | right = f.read() |
| | right_lines = right.splitlines() |
| |
|
| |
|
| | train_examples = [] |
| | for i in range(len(left_lines)): |
| | pair=[] |
| | pair.append(left_lines[i]) |
| | pair.append(senter_lines[i]) |
| | pair.append(right_lines[i]) |
| | |
| | absolutely=abs(data[i]-data2[i]) |
| | |
| | |
| | example = InputExample(texts=pair, label=absolutely) |
| | |
| | |
| | train_examples.append(example) |
| |
|
| |
|
| |
|
| | print(len(train_examples)) |
| |
|
| | device = torch.device('mps') |
| | |
| |
|
| | import torch.nn.functional as F |
| |
|
| |
|
| | train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=8) |
| | train_loss = losses.MarginMSELoss(model=model,similarity_fct=F.cosine_similarity) |
| |
|
| | |
| | model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=100, warmup_steps=100,show_progress_bar=True, |
| | |
| | checkpoint_path='checkpoint_savename',checkpoint_save_steps=9370, |
| | save_best_model=True) |
| | model.save("save_name") |
| |
|
| |
|
| |
|
| |
|
| | ''' |
| | ''' |
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | |
| |
|