id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
20,798
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, DeepSpeedTrainer from bert4torch.callbacks import Callback, Logger from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader maxlen = 256 def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long) batch_labels = torch.tensor(batch_labels, dtype=torch.long) return [batch_token_ids, batch_segment_ids], batch_labels.flatten() model = DeepSpeedTrainer(net, config_path='./deepspeed.json') model.move_to_model_device = True The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(texts)` to solve the following problem: 单条样本推理 Here is the function: def inference(texts): '''单条样本推理 ''' for text in texts: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) token_ids = torch.tensor(token_ids, dtype=torch.long)[None, :] segment_ids = torch.tensor(segment_ids, dtype=torch.long)[None, :] logit = model.predict([token_ids, segment_ids]) y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy() print(text, ' ----> ', y_pred)
单条样本推理
20,799
import torch from torch.utils.data import DataLoader import torch.nn as nn import torch.optim as optim from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, seed_everything, text_segmentate, get_pool_emb from bert4torch.tokenizers import Tokenizer from bert4torch.losses import TemporalEnsemblingLoss maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten() def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten()
null
20,800
import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb from bert4torch.optimizers import extend_with_exponential_moving_average import torch.nn as nn import torch import torch.optim as optim import random, os, numpy as np from torch.utils.data import DataLoader maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten() def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten()
null
20,801
import build_transformer_model, BaseModel import torch from torch.utils.data import DataLoader import torch.nn as nn import torch.optim as optim from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, get_pool_emb, seed_everything from bert4torch.callbacks import AdversarialTraining from bert4torch.tokenizers import Tokenizer maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten()
null
20,802
from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb from bert4torch.optimizers import extend_with_exponential_moving_average, get_linear_schedule_with_warmup import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten() def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten()
null
20,803
import build_transformer_model, BaseModel import torch from torch.utils.data import DataLoader import torch.nn as nn import torch.optim as optim from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, seed_everything, text_segmentate, get_pool_emb from bert4torch.tokenizers import Tokenizer from bert4torch.losses import RDropLoss from tqdm import tqdm import torch.nn.functional as F maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) for _ in range(2): batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten() def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) for _ in range(2): batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten()
null
20,804
from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, Checkpoint import torch.nn as nn import torch import torch.optim as optim import random, os, numpy as np from torch.utils.data import DataLoader maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten() def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
null
20,805
import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from peft import LoraConfig maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten() def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
null
20,806
import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from peft import LoraConfig maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten() model = Model().to(device) The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(texts)` to solve the following problem: 单条样本推理 Here is the function: def inference(texts): '''单条样本推理 ''' for text in texts: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :] segment_ids = torch.tensor(segment_ids, dtype=torch.long, device=device)[None, :] logit = model.predict([token_ids, segment_ids]) y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy() print(text, ' ----> ', y_pred)
单条样本推理
20,807
import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb from bert4torch.callbacks import AdversarialTraining import torch.nn as nn import torch import torch.optim as optim import torch.nn.functional as F from torch.utils.data import DataLoader import random maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): # batch_token_ids包含两部部分,第一部分是有监督数据,第二部分是无监督数据 batch_token_ids, batch_labels = [[], []], [] for text, label in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] batch_token_ids[0].append(token_ids) batch_labels.append([label]) # 无监督部分 unsup_text = random.choice(unsup_dataset) # 随机挑一个无监督数据 token_ids, _ = tokenizer.encode(unsup_text, maxlen=maxlen) batch_token_ids[1].append(token_ids) batch_token_ids = [j for i in batch_token_ids for j in i] batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten()
null
20,808
import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.layers import MixUp from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text, label in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return batch_token_ids, batch_labels.flatten()
null
20,809
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModelDDP from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler import os torch.distributed.init_process_group(backend='nccl') torch.cuda.set_device(local_rank) device = torch.device('cuda', local_rank def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten() def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
null
20,810
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModelDP, add_trainer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything import torch.nn as nn import torch import torch.optim as optim import random, os, numpy as np from torch.utils.data import DataLoader maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids, batch_labels.flatten()], None def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids, batch_labels.flatten()], None
null
20,811
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModelDDP from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler import os torch.distributed.init_process_group(backend='nccl') torch.cuda.set_device(local_rank) device = torch.device('cuda', local_rank def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids, batch_labels.flatten()], None def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids, batch_labels.flatten()], None
null
20,812
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModelDP, add_trainer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything import torch.nn as nn import torch import torch.optim as optim import random, os, numpy as np from torch.utils.data import DataLoader maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten() def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
null
20,813
import AdamW from torch.utils.data import DataLoader from bert4torch.models import build_transformer_model, AccelerateTrainer from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb from bert4torch.optimizers import get_linear_schedule_with_warmup import torch.nn as nn from tqdm import tqdm from torch4keras.model import add_trainer maxlen = 256 tokenizer = Tokenizer(dict_path, do_lower_case=True) def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, maxlen), dtype=torch.long) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids, maxlen), dtype=torch.long) batch_labels = torch.tensor(batch_labels, dtype=torch.long) return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
null
20,814
from bert4torch.layers import GlobalPointer from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from bert4torch.losses import SparseMultilabelCategoricalCrossentropy from tqdm import tqdm import torch from torch.utils.data import DataLoader import torch.optim as optim import numpy as np maxlen = 128 device = 'cuda' if torch.cuda.is_available() else 'cpu' d, id2predicate = {}, {} def collate_fn(batch): def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 batch_token_ids, batch_segment_ids = [], [] batch_entity_labels, batch_head_labels, batch_tail_labels = [], [], [] for d in batch: token_ids, segment_ids = tokenizer.encode(d['text'], maxlen=maxlen) # 整理三元组 {s: [(o, p)]} spoes = set() for s, p, o in d['spo_list']: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] sh = search(s, token_ids) oh = search(o, token_ids) if sh != -1 and oh != -1: spoes.add((sh, sh + len(s) - 1, p, oh, oh + len(o) - 1)) # 构建标签 entity_labels = [set() for _ in range(2)] head_labels = [set() for _ in range(len(predicate2id))] tail_labels = [set() for _ in range(len(predicate2id))] for sh, st, p, oh, ot in spoes: entity_labels[0].add((sh, st)) entity_labels[1].add((oh, ot)) head_labels[p].add((sh, oh)) tail_labels[p].add((st, ot)) for label in entity_labels + head_labels + tail_labels: if not label: # 至少要有一个标签 label.add((0, 0)) # 如果没有则用0填充 entity_labels = sequence_padding([list(l) for l in entity_labels]) # [subject|object=2, 实体个数, 实体起终点] head_labels = sequence_padding([list(l) for l in head_labels]) # [关系个数, 该关系下subject|object配对数, subject|object起点=2] tail_labels = sequence_padding([list(l) for l in tail_labels]) # [关系个数, 该关系下subject|object配对数, subject|object终点=2] # 构建batch batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_entity_labels.append(entity_labels) batch_head_labels.append(head_labels) batch_tail_labels.append(tail_labels) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) # batch_entity_labels: [btz, subject|object=2, 实体个数, 实体起终点] [btz, 2, n, 2] # batch_head_labels: [btz, 关系个数, 该关系下subject|object配对数, subject|object起点=2] [btz, 49, k, 2] # batch_tail_labels: [btz, 关系个数, 该关系下subject|object配对数, subject|object终点=2] [btz, 49, k, 2] batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels, seq_dims=2), dtype=torch.float, device=device) batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.float, device=device) batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.float, device=device) return [batch_token_ids, batch_segment_ids], [batch_entity_labels, batch_head_labels, batch_tail_labels] def collate_fn(batch): def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 batch_token_ids, batch_segment_ids = [], [] batch_entity_labels, batch_head_labels, batch_tail_labels = [], [], [] for d in batch: token_ids, segment_ids = tokenizer.encode(d['text'], maxlen=maxlen) # 整理三元组 {s: [(o, p)]} spoes = set() for s, p, o in d['spo_list']: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] sh = search(s, token_ids) oh = search(o, token_ids) if sh != -1 and oh != -1: spoes.add((sh, sh + len(s) - 1, p, oh, oh + len(o) - 1)) # 构建标签 entity_labels = [set() for _ in range(2)] head_labels = [set() for _ in range(len(predicate2id))] tail_labels = [set() for _ in range(len(predicate2id))] for sh, st, p, oh, ot in spoes: entity_labels[0].add((sh, st)) entity_labels[1].add((oh, ot)) head_labels[p].add((sh, oh)) tail_labels[p].add((st, ot)) for label in entity_labels + head_labels + tail_labels: if not label: # 至少要有一个标签 label.add((0, 0)) # 如果没有则用0填充 entity_labels = sequence_padding([list(l) for l in entity_labels]) # [subject|object=2, 实体个数, 实体起终点] head_labels = sequence_padding([list(l) for l in head_labels]) # [关系个数, 该关系下subject|object配对数, subject|object起点=2] tail_labels = sequence_padding([list(l) for l in tail_labels]) # [关系个数, 该关系下subject|object配对数, subject|object终点=2] # 构建batch batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_entity_labels.append(entity_labels) batch_head_labels.append(head_labels) batch_tail_labels.append(tail_labels) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) # batch_entity_labels: [btz, subject|object=2, 实体个数, 实体起终点] [btz, 2, n, 2] # batch_head_labels: [btz, 关系个数, 该关系下subject|object配对数, subject|object起点=2] [btz, 49, k, 2] # batch_tail_labels: [btz, 关系个数, 该关系下subject|object配对数, subject|object终点=2] [btz, 49, k, 2] batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels, seq_dims=2), dtype=torch.float, device=device) batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.float, device=device) batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.float, device=device) return [batch_token_ids, batch_segment_ids], [batch_entity_labels, batch_head_labels, batch_tail_labels]
null
20,815
json from bert4torch.layers import GlobalPointer from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from bert4torch.losses import SparseMultilabelCategoricalCrossentropy from tqdm import tqdm import torch from torch.utils.data import DataLoader import torch.optim as optim import numpy as np d, id2predicate = {}, {} with open('E:/data/corpus/relation_extraction/BD_Knowledge_Extraction/all_50_schemas', encoding='utf-8') as f: for l in f: l = json.loads(l) if l['predicate'] not in predicate2id: id2predicate[len(predicate2id)] = l['predicate'] predicate2id[l['predicate']] = len(predicate2id) def collate_fn(batch): batch_token_ids, batch_segment_ids = [], [] batch_entity_labels, batch_head_labels, batch_tail_labels = [], [], [] for d in batch: token_ids, segment_ids = tokenizer.encode(d['text'], maxlen=maxlen) # 整理三元组 {s: [(o, p)]} spoes = set() for s, p, o in d['spo_list']: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] sh = search(s, token_ids) oh = search(o, token_ids) if sh != -1 and oh != -1: spoes.add((sh, sh + len(s) - 1, p, oh, oh + len(o) - 1)) # 构建标签 entity_labels = [set() for _ in range(2)] head_labels = [set() for _ in range(len(predicate2id))] tail_labels = [set() for _ in range(len(predicate2id))] for sh, st, p, oh, ot in spoes: entity_labels[0].add((sh, st)) entity_labels[1].add((oh, ot)) head_labels[p].add((sh, oh)) tail_labels[p].add((st, ot)) for label in entity_labels + head_labels + tail_labels: if not label: # 至少要有一个标签 label.add((0, 0)) # 如果没有则用0填充 entity_labels = sequence_padding([list(l) for l in entity_labels]) # [subject|object=2, 实体个数, 实体起终点] head_labels = sequence_padding([list(l) for l in head_labels]) # [关系个数, 该关系下subject|object配对数, subject|object起点=2] tail_labels = sequence_padding([list(l) for l in tail_labels]) # [关系个数, 该关系下subject|object配对数, subject|object终点=2] # 构建batch batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_entity_labels.append(entity_labels) batch_head_labels.append(head_labels) batch_tail_labels.append(tail_labels) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) # batch_entity_labels: [btz, subject|object=2, 实体个数, 实体起终点] [btz, 2, n, 2] # batch_head_labels: [btz, 关系个数, 该关系下subject|object配对数, subject|object起点=2] [btz, 49, k, 2] # batch_tail_labels: [btz, 关系个数, 该关系下subject|object配对数, subject|object终点=2] [btz, 49, k, 2] batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels, seq_dims=2), dtype=torch.float, device=device) batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.float, device=device) batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.float, device=device) return [batch_token_ids, batch_segment_ids], [batch_entity_labels, batch_head_labels, batch_tail_labels] def extract_spoes(text, threshold=0): """抽取输入text所包含的三元组 """ tokens = tokenizer.tokenize(text, maxlen=maxlen) mapping = tokenizer.rematch(text, tokens) token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) token_ids = torch.tensor([token_ids], dtype=torch.long, device=device) segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=device) outputs = model.predict([token_ids, segment_ids]) outputs = [o[0].cpu().numpy() for o in outputs] # [heads, seq_len, seq_len] # 抽取subject和object subjects, objects = set(), set() outputs[0][:, [0, -1]] -= float('inf') outputs[0][:, :, [0, -1]] -= float('inf') for l, h, t in zip(*np.where(outputs[0] > threshold)): if l == 0: subjects.add((h, t)) else: objects.add((h, t)) # 识别对应的predicate spoes = set() for sh, st in subjects: for oh, ot in objects: p1s = np.where(outputs[1][:, sh, oh] > threshold)[0] p2s = np.where(outputs[2][:, st, ot] > threshold)[0] ps = set(p1s) & set(p2s) for p in ps: spoes.add(( text[mapping[sh][0]:mapping[st][-1] + 1], id2predicate[p], text[mapping[oh][0]:mapping[ot][-1] + 1] )) return list(spoes) class SPO(tuple): """用来存三元组的类 表现跟tuple基本一致,只是重写了 __hash__ 和 __eq__ 方法, 使得在判断两个三元组是否等价时容错性更好。 """ def __init__(self, spo): self.spox = (tuple(tokenizer.tokenize(spo[0])), spo[1], tuple(tokenizer.tokenize(spo[2]))) def __hash__(self): return self.spox.__hash__() def __eq__(self, spo): return self.spox == spo.spox The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data)` to solve the following problem: 评估函数,计算f1、precision、recall Here is the function: def evaluate(data): """评估函数,计算f1、precision、recall """ X, Y, Z = 0, 1e-10, 1e-10 f = open('dev_pred.json', 'w', encoding='utf-8') pbar = tqdm() for d in data: R = set([SPO(spo) for spo in extract_spoes(d['text'])]) T = set([SPO(spo) for spo in d['spo_list']]) X += len(R & T) Y += len(R) Z += len(T) f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z pbar.update() pbar.set_description('f1: %.5f, precision: %.5f, recall: %.5f' % (f1, precision, recall)) s = json.dumps({'text': d['text'], 'spo_list': list(T), 'spo_list_pred': list(R), 'new': list(R - T), 'lack': list(T - R)}, ensure_ascii=False, indent=4) f.write(s + '\n') pbar.close() f.close() return f1, precision, recall
评估函数,计算f1、precision、recall
20,816
from bert4torch.layers import LayerNorm from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from bert4torch.callbacks import AdversarialTraining from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn maxlen = 256 def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, segment_ids, spoe The provided code snippet includes necessary dependencies for implementing the `get_spoes` function. Write a Python function `def get_spoes(text, spo_list)` to solve the following problem: 单独抽出来,这样读取数据时候,可以根据spoes来选择跳过 Here is the function: def get_spoes(text, spo_list): '''单独抽出来,这样读取数据时候,可以根据spoes来选择跳过 ''' def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) # 整理三元组 {s: [(o, p)]} spoes = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, segment_ids, spoes
单独抽出来,这样读取数据时候,可以根据spoes来选择跳过
20,817
from bert4torch.layers import LayerNorm from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from bert4torch.callbacks import AdversarialTraining from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn maxlen = 256 device = 'cuda' if torch.cuda.is_available() else 'cpu' d, id2predicate = {}, {} token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, segment_ids, spoe unsup_dataset = [sen for sen in (train_dataset.data + valid_dataset.data)] def collate_fn(batch): batch_token_ids, batch_segment_ids = [[], []], [[], []] batch_subject_labels, batch_subject_ids, batch_object_labels = [], [], [] for d in batch: token_ids, segment_ids, spoes = d['token_ids'], d['segment_ids'], d['spoes'] if spoes: # subject标签 subject_labels = np.zeros((len(token_ids), 2)) for s in spoes: subject_labels[s[0], 0] = 1 # subject首 subject_labels[s[1], 1] = 1 # subject尾 # 随机选一个subject(这里没有实现错误!这就是想要的效果!!) # Todo: 感觉可以对未选到的subject加个mask,这样计算loss就不会计算到,可能因为模型对prob**n正例加权重导致影响不大 start, end = np.array(list(spoes.keys())).T start = np.random.choice(start) end = np.random.choice(end[end >= start]) subject_ids = (start, end) # 对应的object标签 object_labels = np.zeros((len(token_ids), len(predicate2id), 2)) for o in spoes.get(subject_ids, []): object_labels[o[0], o[2], 0] = 1 object_labels[o[1], o[2], 1] = 1 # 构建batch batch_token_ids[0].append(token_ids) batch_segment_ids[0].append(segment_ids) batch_subject_labels.append(subject_labels) batch_subject_ids.append(subject_ids) batch_object_labels.append(object_labels) unsup_text = random.choice(unsup_dataset) token_ids1, segment_ids1 = tokenizer.encode(unsup_text['text'], maxlen=maxlen) batch_token_ids[1].append(token_ids1) batch_segment_ids[1].append(segment_ids1) batch_token_ids = [j for i in batch_token_ids for j in i] batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = [j for i in batch_segment_ids for j in i] batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_subject_ids = torch.tensor(batch_subject_ids, dtype=torch.long, device=device) batch_subject_labels = torch.tensor(sequence_padding(batch_subject_labels), dtype=torch.float, device=device) batch_object_labels = torch.tensor(sequence_padding(batch_object_labels), dtype=torch.float, device=device) batch_attention_mask = (batch_token_ids[:batch_subject_labels.shape[0]] != tokenizer._token_pad_id) return [batch_token_ids, batch_segment_ids, batch_subject_ids], [batch_subject_labels, batch_object_labels, batch_attention_mask]
null
20,818
from bert4torch.layers import LayerNorm from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from bert4torch.callbacks import AdversarialTraining from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn d, id2predicate = {}, {} with open('E:/data/corpus/relation_extraction/chip2020/53_schemas.json', encoding='utf-8') as f: for l in f: l = json.loads(l) if l['predicate'] not in predicate2id: id2predicate[len(predicate2id)] = l['predicate'] predicate2id[l['predicate']] = len(predicate2id) for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) with open(filename, encoding='utf-8') as f: for l in tqdm(f): l = json.loads(l) text = l['text'] spo_list = l['spo_list'] labels = [] for spo in spo_list: subject = spo['subject'] object = spo['object'] predicate = spo['predicate'] labels.append((subject, predicate, object)) token_ids, segment_ids, spoes = get_spoes(text, labels) if spoes: D.append({'text': text, 'spo_list': labels, 'token_ids': token_ids, 'segment_ids': segment_ids, 'spoes': spoes}) def extract_spoes(text): """抽取输入text所包含的三元组 """ tokens = tokenizer.tokenize(text, maxlen=maxlen) mapping = tokenizer.rematch(text, tokens) token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) token_ids = torch.tensor([token_ids], dtype=torch.long, device=device) segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=device) # 抽取subject seq_output, subject_preds = train_model.predict_subject([token_ids, segment_ids]) subject_preds[:, [0, -1]] *= 0 # 首cls, 尾sep置为0 start = torch.where(subject_preds[0, :, 0] > 0.6)[0] end = torch.where(subject_preds[0, :, 1] > 0.5)[0] subjects = [] for i in start: j = end[end >= i] if len(j) > 0: j = j[0] subjects.append((i.item(), j.item())) if subjects: spoes = [] # token_ids = token_ids.repeat([len(subjects)]+[1]*(len(token_ids.shape)-1)) # segment_ids = segment_ids.repeat([len(subjects)]+[1]*(len(token_ids.shape)-1)) seq_output = seq_output.repeat([len(subjects)] + [1] * (len(seq_output.shape) - 1)) subjects = torch.tensor(subjects, dtype=torch.long, device=device) # 传入subject,抽取object和predicate object_preds = train_model.predict_object([seq_output, subjects]) object_preds[:, [0, -1]] *= 0 for subject, object_pred in zip(subjects, object_preds): start = torch.where(object_pred[:, :, 0] > 0.6) end = torch.where(object_pred[:, :, 1] > 0.5) for _start, predicate1 in zip(*start): for _end, predicate2 in zip(*end): if _start <= _end and predicate1 == predicate2: spoes.append( ((mapping[subject[0]][0], mapping[subject[1]][-1]), predicate1.item(), (mapping[_start][0], mapping[_end][-1])) ) break return [(text[s[0]:s[1] + 1], id2predicate[p], text[o[0]:o[1] + 1]) for s, p, o, in spoes] else: return [] class SPO(tuple): """用来存三元组的类 表现跟tuple基本一致,只是重写了 __hash__ 和 __eq__ 方法, 使得在判断两个三元组是否等价时容错性更好。 """ def __init__(self, spo): self.spox = ( tuple(tokenizer.tokenize(spo[0])), spo[1], tuple(tokenizer.tokenize(spo[2])), ) def __hash__(self): return self.spox.__hash__() def __eq__(self, spo): return self.spox == spo.spox The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data)` to solve the following problem: 评估函数,计算f1、precision、recall Here is the function: def evaluate(data): """评估函数,计算f1、precision、recall """ X, Y, Z = 1e-10, 1e-10, 1e-10 f = open('dev_pred.json', 'w', encoding='utf-8') pbar = tqdm() for d in data: R = set([SPO(spo) for spo in extract_spoes(d['text'])]) T = set([SPO(spo) for spo in d['spo_list']]) X += len(R & T) Y += len(R) Z += len(T) f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z pbar.update() pbar.set_description( 'f1: %.5f, precision: %.5f, recall: %.5f' % (f1, precision, recall) ) s = json.dumps({ 'text': d['text'], 'spo_list': list(T), 'spo_list_pred': list(R), 'new': list(R - T), 'lack': list(T - R), }, ensure_ascii=False, indent=4) f.write(s + '\n') pbar.close() f.close() return f1, precision, recall
评估函数,计算f1、precision、recall
20,819
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.layers import TplinkerHandshakingKernel from tqdm import tqdm import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.optim as optim def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i)) The provided code snippet includes necessary dependencies for implementing the `trans_ij2k` function. Write a Python function `def trans_ij2k(seq_len, i, j)` to solve the following problem: 把第i行,第j列转化成上三角flat后的序号 Here is the function: def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i))
把第i行,第j列转化成上三角flat后的序号
20,820
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.layers import TplinkerHandshakingKernel from tqdm import tqdm import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.optim as optim maxlen = 64 device = 'cuda' if torch.cuda.is_available() else 'cpu'd, id2predicate = {}, {} def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i)) map_ij2k = {(i, j): trans_ij2k(maxlen, i, j) for i in range(maxlen) for j in range(maxlen) if j >= i} def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) if isinstance(sequence, torch.Tensor): sequence = sequence.cpu().tolist() for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 def collate_fn(batch): pair_len = maxlen * (maxlen+1)//2 # batch_entity_labels: [btz, pair_len] # batch_head_labels: [btz, rel_size, pair_len] # batch_tail_labels: [btz, rel_size, pair_len] batch_entity_labels = torch.zeros((len(batch), pair_len), dtype=torch.long, device=device) batch_head_labels = torch.zeros((len(batch), len(predicate2id), pair_len), dtype=torch.long, device=device) batch_tail_labels = torch.zeros((len(batch), len(predicate2id), pair_len), dtype=torch.long, device=device) batch_token_ids = [] for i, d in enumerate(batch): token_ids = tokenizer.encode(d['text'])[0][1:-1][:maxlen] # 这里要限制取前max_len个 batch_token_ids.append(token_ids) # 整理三元组 {s: [(o, p)]} for s, p, o in d['spo_list']: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] sh = search(s, token_ids) # 这里超过长度就会找不到 oh = search(o, token_ids) if sh != -1 and oh != -1: st, ot = sh+len(s)-1, oh+len(o)-1 batch_entity_labels[i, map_ij2k[sh, st]] = 1 batch_entity_labels[i, map_ij2k[oh, ot]] = 1 if sh <= oh: batch_head_labels[i, p, map_ij2k[sh, oh]] = 1 else: batch_head_labels[i, p, map_ij2k[oh, sh]] = 2 if st <= ot: batch_tail_labels[i, p, map_ij2k[st, ot]] = 1 else: batch_tail_labels[i, p, map_ij2k[ot, st]] = 2 batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, length=maxlen), dtype=torch.long, device=device) return [batch_token_ids], [batch_entity_labels, batch_head_labels, batch_tail_labels]
null
20,821
json from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.layers import TplinkerHandshakingKernel from tqdm import tqdm import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.optim as optim d, id2predicate = {}, {} with open('E:/data/corpus/relation_extraction/BD_Knowledge_Extraction/all_50_schemas', encoding='utf-8') as f: for l in f: l = json.loads(l) if l['predicate'] not in predicate2id: id2predicate[len(predicate2id)] = l['predicate'] predicate2id[l['predicate']] = len(predicate2id) def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i)) def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) if isinstance(sequence, torch.Tensor): sequence = sequence.cpu().tolist() for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 def extract_spoes(text): """抽取输入text所包含的三元组 """ def get_spots_fr_shaking_tag(shaking_tag): '''解析关系 ''' spots = [] for shaking_inds in shaking_tag.nonzero(): rel_id = shaking_inds[0].item() tag_id = shaking_tag[rel_id][shaking_inds[1]].item() matrix_inds = map_k2ij[shaking_inds[1].item()] # 保证前面是subject,后面是object if tag_id == 1: spot = (rel_id, matrix_inds[0], matrix_inds[1]) elif tag_id == 2: spot = (rel_id, matrix_inds[1], matrix_inds[0]) spots.append(spot) return spots tokens = tokenizer.tokenize(text)[1:-1] mapping = tokenizer.rematch(text, tokens) token_ids = tokenizer.encode(text)[0][1:-1] token_ids_ts = torch.tensor(sequence_padding([token_ids], length=maxlen), dtype=torch.long, device=device) outputs = model.predict([token_ids_ts]) outputs = [o[0].argmax(dim=-1) for o in outputs] # 抽取entity ent_matrix_spots = set() ent_text = set() for shaking_ind in outputs[0].nonzero(): shaking_ind_ = shaking_ind[0].item() # tag_id = outputs[0][shaking_ind_] matrix_inds = map_k2ij[shaking_ind_] spot = (matrix_inds[0], matrix_inds[1]) if (spot[0] < len(mapping)) and (spot[1] < len(mapping)): # 实体起始在mapping范围内 ent_matrix_spots.add(spot) ent_text.add(text[mapping[spot[0]][0]:mapping[spot[1]][-1] + 1]) # 识别对应的predicate head_rel_matrix_spots = get_spots_fr_shaking_tag(outputs[1]) tail_rel_matrix_spots = get_spots_fr_shaking_tag(outputs[2]) spoes = [] for rel_h, sh, oh in head_rel_matrix_spots: for rel_t, st, ot in tail_rel_matrix_spots: # 如果关系相同,且(sh, st)和(oh, ot)都在entity_maxtrix_spots中 if (rel_h == rel_t) and ((sh, st) in ent_matrix_spots) and ((oh, ot) in ent_matrix_spots): spoes.append((text[mapping[sh][0]:mapping[st][-1] + 1], id2predicate[rel_h], text[mapping[oh][0]:mapping[ot][-1] + 1])) return spoes, token_ids, ent_text class SPO(tuple): """用来存三元组的类 表现跟tuple基本一致,只是重写了 __hash__ 和 __eq__ 方法, 使得在判断两个三元组是否等价时容错性更好。 """ def __init__(self, spo): self.spox = (tuple(tokenizer.tokenize(spo[0])), spo[1], tuple(tokenizer.tokenize(spo[2]))) def __hash__(self): return self.spox.__hash__() def __eq__(self, spo): return self.spox == spo.spox The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data)` to solve the following problem: 评估函数,计算f1、precision、recall Here is the function: def evaluate(data): """评估函数,计算f1、precision、recall """ X, Y, Z = 0, 1e-10, 1e-10 E1, E2 = 0, 1e-10 f = open('dev_pred.json', 'w', encoding='utf-8') pbar = tqdm() for d in data: spoes, token_ids, ent_text_pred = extract_spoes(d['text']) # spo_list是用来根据maxlen删减的 spo_list = [] for s, p, o in d['spo_list']: s_ = tokenizer.encode(s)[0][1:-1] o_ = tokenizer.encode(o)[0][1:-1] sh = search(s_, token_ids) # 这里超过长度就会找不到 oh = search(o_, token_ids) if sh != -1 and oh != -1: spo_list.append((s, p, o)) # 计算三元组的f1值 R = set([SPO(spo) for spo in spoes]) T = set([SPO(spo) for spo in spo_list]) X += len(R & T) Y += len(R) Z += len(T) f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z # 计算实体的指标 ent_text_truth = set([spo[0] for spo in spo_list] + [spo[-1] for spo in spo_list]) E1 += len(ent_text_pred & ent_text_truth) E2 += len(ent_text_truth) E_acc = E1 / E2 # 计算entity_matrix, head_matrix,tail_matrix的accuracy pbar.update() pbar.set_description('f1: %.5f, precision: %.5f, recall: %.5f, ent_acc: %.5f' % (f1, precision, recall, E_acc)) s = json.dumps({'text': d['text'], 'spo_list': list(T), 'spo_list_pred': list(R), 'new': list(R - T), 'lack': list(T - R)}, ensure_ascii=False, indent=4) f.write(s + '\n') pbar.close() f.close() return f1, precision, recall
评估函数,计算f1、precision、recall
20,822
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.losses import MultilabelCategoricalCrossentropy from bert4torch.layers import TplinkerHandshakingKernel from tqdm import tqdm import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.optim as optim import numpy as np def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i)) The provided code snippet includes necessary dependencies for implementing the `trans_ij2k` function. Write a Python function `def trans_ij2k(seq_len, i, j)` to solve the following problem: 把第i行,第j列转化成上三角flat后的序号 Here is the function: def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i))
把第i行,第j列转化成上三角flat后的序号
20,823
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.losses import MultilabelCategoricalCrossentropy from bert4torch.layers import TplinkerHandshakingKernel from tqdm import tqdm import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.optim as optim import numpy as np def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i)) tag2id = tran_ent_rel2id() The provided code snippet includes necessary dependencies for implementing the `tran_ent_rel2id` function. Write a Python function `def tran_ent_rel2id()` to solve the following problem: 获取最后一个分类层的的映射关系 Here is the function: def tran_ent_rel2id(): '''获取最后一个分类层的的映射关系 ''' tag2id = {'ent': 0} for p in predicate2id.keys(): for mode in ['sh_oh', 'oh_sh', 'st_ot', 'ot_st']: tag2id[p+'##'+mode] = len(tag2id) return tag2id
获取最后一个分类层的的映射关系
20,824
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.losses import MultilabelCategoricalCrossentropy from bert4torch.layers import TplinkerHandshakingKernel from tqdm import tqdm import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.optim as optim import numpy as np maxlen = 64 device = 'cuda' if torch.cuda.is_available() else 'cpu' d, id2predicate = {}, {} def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i)) map_ij2k = {(i, j): trans_ij2k(maxlen, i, j) for i in range(maxlen) for j in range(maxlen) if j >= i} tag2id = tran_ent_rel2id() def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 def collate_fn(batch): pair_len = maxlen * (maxlen+1)//2 # batch_head_labels: [btz, pair_len, tag2id_len] batch_labels = torch.zeros((len(batch), pair_len, len(tag2id)), dtype=torch.long, device=device) batch_token_ids = [] for i, d in enumerate(batch): token_ids = tokenizer.encode(d['text'])[0][1:-1][:maxlen] # 这里要限制取前max_len个 batch_token_ids.append(token_ids) # 整理三元组 {s: [(o, p)]} for s, p, o in d['spo_list']: s = tokenizer.encode(s)[0][1:-1] o = tokenizer.encode(o)[0][1:-1] sh = search(s, token_ids) oh = search(o, token_ids) if sh != -1 and oh != -1: st, ot = sh+len(s)-1, oh+len(o)-1 batch_labels[i, map_ij2k[sh, st], tag2id['ent']] = 1 batch_labels[i, map_ij2k[oh, ot], tag2id['ent']] = 1 if sh <= oh: batch_labels[i, map_ij2k[sh, oh], tag2id[p+'##sh_oh']] = 1 else: batch_labels[i, map_ij2k[oh, sh], tag2id[p+'##oh_sh']] = 1 if st <= ot: batch_labels[i, map_ij2k[st, ot], tag2id[p+'##st_ot']] = 1 else: batch_labels[i, map_ij2k[ot, st], tag2id[p+'##ot_st']] = 1 batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, length=maxlen), dtype=torch.long, device=device) return [batch_token_ids], batch_labels
null
20,825
json from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.losses import MultilabelCategoricalCrossentropy from bert4torch.layers import TplinkerHandshakingKernel from tqdm import tqdm import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.optim as optim import numpy as np d, id2predicate = {}, {} with open('E:/data/corpus/relation_extraction/BD_Knowledge_Extraction/all_50_schemas', encoding='utf-8') as f: for l in f: l = json.loads(l) if l['predicate'] not in predicate2id: id2predicate[len(predicate2id)] = l['predicate'] predicate2id[l['predicate']] = len(predicate2id) def trans_ij2k(seq_len, i, j): '''把第i行,第j列转化成上三角flat后的序号 ''' if (i > seq_len - 1) or (j > seq_len - 1) or (i > j): return 0 return int(0.5*(2*seq_len-i+1)*i+(j-i)) def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 def extract_spoes(text, threshold=0): """抽取输入text所包含的三元组 """ tokens = tokenizer.tokenize(text)[1:-1] mapping = tokenizer.rematch(text, tokens) token_ids = tokenizer.encode(text)[0][1:-1] token_ids_ = torch.tensor(sequence_padding([token_ids], length=maxlen), dtype=torch.long, device=device) outputs = model.predict([token_ids_])[0].cpu().numpy() # [pair_len, tag_size] # 抽取entity, 识别对应的predicate ent_matrix_spots, ent_text = set(), set() head_rel_matrix_spots, tail_rel_matrix_spots = [], [] for shaking_ind, tag_id in zip(*np.where(outputs > threshold)): matrix_inds = map_k2ij[shaking_ind] spot = (matrix_inds[0], matrix_inds[1]) if (spot[0] < len(mapping)) and (spot[1] < len(mapping)): # 实体起始在mapping范围内 p = id2tag[tag_id].split('##')[0] if id2tag[tag_id] == 'ent': ent_matrix_spots.add(spot) ent_text.add(text[mapping[spot[0]][0]:mapping[spot[1]][-1] + 1]) else: p = predicate2id[p] if id2tag[tag_id].endswith('##sh_oh'): head_rel_matrix_spots.append((p, spot[0], spot[1])) elif id2tag[tag_id].endswith('##oh_sh'): head_rel_matrix_spots.append((p, spot[1], spot[0])) elif id2tag[tag_id].endswith('##st_ot'): tail_rel_matrix_spots.append((p, spot[0], spot[1])) elif id2tag[tag_id].endswith('##ot_st'): tail_rel_matrix_spots.append((p, spot[1], spot[0])) spoes = [] for rel_h, sh, oh in head_rel_matrix_spots: for rel_t, st, ot in tail_rel_matrix_spots: # 如果关系相同,且(sh, st)和(oh, ot)都在entity_maxtrix_spots中 if (rel_h == rel_t) and ((sh, st) in ent_matrix_spots) and ((oh, ot) in ent_matrix_spots): spoes.append((text[mapping[sh][0]:mapping[st][-1] + 1], id2predicate[rel_h], text[mapping[oh][0]:mapping[ot][-1] + 1])) return spoes, token_ids, ent_text class SPO(tuple): """用来存三元组的类 表现跟tuple基本一致,只是重写了 __hash__ 和 __eq__ 方法, 使得在判断两个三元组是否等价时容错性更好。 """ def __init__(self, spo): self.spox = (tuple(tokenizer.tokenize(spo[0])), spo[1], tuple(tokenizer.tokenize(spo[2]))) def __hash__(self): return self.spox.__hash__() def __eq__(self, spo): return self.spox == spo.spox The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data)` to solve the following problem: 评估函数,计算f1、precision、recall Here is the function: def evaluate(data): """评估函数,计算f1、precision、recall """ X, Y, Z = 0, 1e-10, 1e-10 E1, E2 = 0, 1e-10 f = open('dev_pred.json', 'w', encoding='utf-8') pbar = tqdm() for d in data: spoes, token_ids, ent_text_pred = extract_spoes(d['text']) # spo_list是用来根据maxlen删减的 spo_list = [] for s, p, o in d['spo_list']: s_ = tokenizer.encode(s)[0][1:-1] o_ = tokenizer.encode(o)[0][1:-1] sh = search(s_, token_ids) # 这里超过长度就会找不到 oh = search(o_, token_ids) if sh != -1 and oh != -1: spo_list.append((s, p, o)) # 计算三元组的f1值 R = set([SPO(spo) for spo in spoes]) T = set([SPO(spo) for spo in spo_list]) X += len(R & T) Y += len(R) Z += len(T) f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z # 计算实体的指标 ent_text_truth = set([spo[0] for spo in spo_list] + [spo[-1] for spo in spo_list]) E1 += len(ent_text_pred & ent_text_truth) E2 += len(ent_text_truth) E_acc = E1 / E2 # 计算entity_matrix, head_matrix,tail_matrix的accuracy pbar.update() pbar.set_description('f1: %.5f, precision: %.5f, recall: %.5f, ent_acc: %.5f' % (f1, precision, recall, E_acc)) s = json.dumps({'text': d['text'], 'spo_list': list(T), 'spo_list_pred': list(R), 'new': list(R - T), 'lack': list(T - R)}, ensure_ascii=False, indent=4) f.write(s + '\n') pbar.close() f.close() return f1, precision, recall
评估函数,计算f1、precision、recall
20,826
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.layers import MultiHeadAttentionLayer, PositionWiseFeedForward from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, seed_everything from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import collections import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from scipy.optimize import linear_sum_assignment maxlen = 128 def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: assert token_ids[s_idx:s_idx + len(s)] == s assert token_ids[o_idx:o_idx + len(o)] == o s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, segment_ids, spoe for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) import BertIntermediate, BertOutput, BertAttention The provided code snippet includes necessary dependencies for implementing the `get_spoes` function. Write a Python function `def get_spoes(text, spo_list)` to solve the following problem: 单独抽出来,这样读取数据时候,可以根据spoes来选择跳过 Here is the function: def get_spoes(text, spo_list): '''单独抽出来,这样读取数据时候,可以根据spoes来选择跳过 ''' def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) # 整理三元组 {s: [(o, p)]} spoes = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: assert token_ids[s_idx:s_idx + len(s)] == s assert token_ids[o_idx:o_idx + len(o)] == o s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, segment_ids, spoes
单独抽出来,这样读取数据时候,可以根据spoes来选择跳过
20,827
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.layers import MultiHeadAttentionLayer, PositionWiseFeedForward from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, seed_everything from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import collections import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from scipy.optimize import linear_sum_assignment device = 'cuda' if torch.cuda.is_available() else 'cpu' d, id2predicate = {}, {} token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) = {} return token_ids, segment_ids, spoe import BertIntermediate, BertOutput, BertAttention def collate_fn(batch): batch_token_ids, batch_segment_ids = [], [] targets = [] for d in batch: token_ids, segment_ids, spoes = d['token_ids'], d['segment_ids'], d['spoes'] if spoes: target = {"relation": [], "head_start_index": [], "head_end_index": [], "tail_start_index": [], "tail_end_index": []} for (head_start_index, head_end_index), object_labels in spoes.items(): for tail_start_index, tail_end_index, relation_id in object_labels: target["relation"].append(relation_id) target["head_start_index"].append(head_start_index) target["head_end_index"].append(head_end_index) target["tail_start_index"].append(tail_start_index) target["tail_end_index"].append(tail_end_index) # 构建batch batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) targets.append(target) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) targets = [{k: torch.tensor(v, dtype=torch.long, device=device) for k, v in t.items()} for t in targets] return [batch_token_ids, batch_segment_ids], targets
null
20,828
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.layers import MultiHeadAttentionLayer, PositionWiseFeedForward from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, seed_everything from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import collections import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from scipy.optimize import linear_sum_assignment num_generated_triples = 10 def generate_span(start_logits, end_logits, seq_lens): def generate_relation(pred_rel_logits): _Pred_Triple = collections.namedtuple( "Pred_Triple", ["pred_rel", "rel_prob", "head_start_index", "head_end_index", "head_start_prob", "head_end_prob", "tail_start_index", "tail_end_index", "tail_start_prob", "tail_end_prob"] ) pred_head_ent_dict = generate_span(output["head_start_logits"], output["head_end_logits"], seq_lens) pred_tail_ent_dict = generate_span(output["tail_start_logits"], output["tail_end_logits"], seq_lens) pred_rel_dict = generate_relation(output['pred_rel_logits']) triples = [] for triple_id in range(num_generated_triples): pred_rel = pred_rel_dict[triple_id] pred_head = pred_head_ent_dict[triple_id] pred_tail = pred_tail_ent_dict[triple_id] triple = generate_strategy(pred_rel, pred_head, pred_tail, num_classes, _Pred_Triple) if triple: triples.append(triple) return triple generate_strategy(pred_rel, pred_head, pred_tail, num_classes, _Pred_Triple if pred_rel.pred_rel != num_classes: if pred_head and pred_tail: for ele in pred_head: if ele.start_index != 0: break head = ele for ele in pred_tail: if ele.start_index != 0: break tail = ele return _Pred_Triple(pred_rel=pred_rel.pred_rel, rel_prob=pred_rel.rel_prob, head_start_index=head.start_index, head_end_index=head.end_index, head_start_prob=head.start_prob, head_end_prob=head.end_prob, tail_start_index=tail.start_index, tail_end_index=tail.end_index, tail_start_prob=tail.start_prob, tail_end_prob=tail.end_prob) else: return else: retur import BertIntermediate, BertOutput, BertAttention def generate_triple(output, seq_lens, num_classes): _Pred_Triple = collections.namedtuple( "Pred_Triple", ["pred_rel", "rel_prob", "head_start_index", "head_end_index", "head_start_prob", "head_end_prob", "tail_start_index", "tail_end_index", "tail_start_prob", "tail_end_prob"] ) pred_head_ent_dict = generate_span(output["head_start_logits"], output["head_end_logits"], seq_lens) pred_tail_ent_dict = generate_span(output["tail_start_logits"], output["tail_end_logits"], seq_lens) pred_rel_dict = generate_relation(output['pred_rel_logits']) triples = [] for triple_id in range(num_generated_triples): pred_rel = pred_rel_dict[triple_id] pred_head = pred_head_ent_dict[triple_id] pred_tail = pred_tail_ent_dict[triple_id] triple = generate_strategy(pred_rel, pred_head, pred_tail, num_classes, _Pred_Triple) if triple: triples.append(triple) # print(triples) return triples
null
20,829
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.layers import MultiHeadAttentionLayer, PositionWiseFeedForward from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, seed_everything from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import collections import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from scipy.optimize import linear_sum_assignment d, id2predicate = {}, {} with open('E:/data/corpus/relation_extraction/BD_Knowledge_Extraction/all_50_schemas', encoding='utf-8') as f: for l in f: l = json.loads(l) if l['predicate'] not in predicate2id: id2predicate[len(predicate2id)] = l['predicate'] predicate2id[l['predicate']] = len(predicate2id) for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: assert token_ids[s_idx:s_idx + len(s)] == s assert token_ids[o_idx:o_idx + len(o)] == o s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) with open(filename, encoding='utf-8') as f: for l in tqdm(f, desc='Loading data'): l = json.loads(l) labels = [(spo['subject'], spo['predicate'], spo['object']) for spo in l['spo_list']] token_ids, segment_ids, spoes = get_spoes(l['text'], labels) if spoes: D.append({'text': l['text'], 'spo_list': labels, 'token_ids': token_ids, 'segment_ids': segment_ids, 'spoes': spoes}) if len(D) > 1000: break import BertIntermediate, BertOutput, BertAttention def extract_spoes(text, threshold=0): """抽取输入text所包含的三元组 """ tokens = tokenizer.tokenize(text, maxlen=maxlen) mapping = tokenizer.rematch(text, tokens) token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) token_ids = torch.tensor([token_ids], dtype=torch.long, device=device) segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=device) # 抽取subject preds = model.gen_triples(token_ids, segment_ids) spoes = set() for pred in preds: if (pred.head_start_prob > threshold) and \ (pred.head_end_prob > threshold) and \ (pred.tail_start_prob > threshold) and \ (pred.tail_end_prob > threshold) and \ (pred.rel_prob > threshold): spoes.add(( text[mapping[pred.head_start_index][0]:mapping[pred.head_end_index][-1] + 1], id2predicate[pred.pred_rel], text[mapping[pred.tail_start_index][0]:mapping[pred.tail_end_index][-1] + 1] )) return spoes class SPO(tuple): """用来存三元组的类 表现跟tuple基本一致,只是重写了 __hash__ 和 __eq__ 方法, 使得在判断两个三元组是否等价时容错性更好。 """ def __init__(self, spo): self.spox = ( tuple(tokenizer.tokenize(spo[0])), spo[1], tuple(tokenizer.tokenize(spo[2])), ) def __hash__(self): return self.spox.__hash__() def __eq__(self, spo): return self.spox == spo.spox The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data)` to solve the following problem: 评估函数,计算f1、precision、recall Here is the function: def evaluate(data): """评估函数,计算f1、precision、recall """ X, Y, Z = 1e-10, 1e-10, 1e-10 f = open('dev_pred.json', 'w', encoding='utf-8') pbar = tqdm() for d in data: R = set([SPO(spo) for spo in extract_spoes(d['text'])]) T = set([SPO(spo) for spo in d['spo_list']]) X += len(R & T) Y += len(R) Z += len(T) f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z pbar.update() pbar.set_description( 'f1: %.5f, precision: %.5f, recall: %.5f' % (f1, precision, recall) ) s = json.dumps({ 'text': d['text'], 'spo_list': list(T), 'spo_list_pred': list(R), 'new': list(R - T), 'lack': list(T - R), }, ensure_ascii=False, indent=4) f.write(s + '\n') pbar.close() f.close() return f1, precision, recall
评估函数,计算f1、precision、recall
20,830
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn from collections import Counter import random maxlen = 128 def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 tokens = tokenizer.tokenize(text, maxlen=maxlen) token_ids = tokenizer.tokens_to_ids(tokens) = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, tokens, spoe The provided code snippet includes necessary dependencies for implementing the `get_spoes` function. Write a Python function `def get_spoes(text, spo_list)` to solve the following problem: 单独抽出来,这样读取数据时候,可以根据spoes来选择跳过 Here is the function: def get_spoes(text, spo_list): '''单独抽出来,这样读取数据时候,可以根据spoes来选择跳过 ''' def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 tokens = tokenizer.tokenize(text, maxlen=maxlen) token_ids = tokenizer.tokens_to_ids(tokens) # 整理三元组 {s: [(o, p)]} spoes = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, tokens, spoes
单独抽出来,这样读取数据时候,可以根据spoes来选择跳过
20,831
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn from collections import Counter import random maxlen = 128 device = 'cuda' if torch.cuda.is_available() else 'cpu' token_ids = tokenizer.tokens_to_ids(tokens) return token_ids, tokens, spoe def collate_fn(data): token_ids, corres_tags, seq_tags, rel, rel_tags = map(list, zip(*data)) token_ids = torch.tensor(sequence_padding(token_ids, length=maxlen), dtype=torch.long, device=device) corres_tags = torch.tensor(sequence_padding(corres_tags), dtype=torch.long, device=device) seq_tags = torch.tensor(sequence_padding(seq_tags), dtype=torch.long, device=device) rel = torch.tensor(rel, dtype=torch.long, device=device) rel_tags = torch.tensor(sequence_padding(rel_tags), dtype=torch.long, device=device) attention_mask = (token_ids != tokenizer._token_pad_id).long() return [token_ids, rel], [seq_tags, rel_tags, corres_tags, attention_mask]
null
20,832
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn from collections import Counter import random maxlen = 128 device = 'cuda' if torch.cuda.is_available() else 'cpu' tokens = tokenizer.tokenize(text, maxlen=maxlen) token_ids = tokenizer.tokens_to_ids(tokens) = {} return token_ids, tokens, spoe def collate_fn_test(data): token_ids, spoes, tokens = map(list, zip(*data)) token_ids = torch.tensor(sequence_padding(token_ids, length=maxlen), dtype=torch.long, device=device) return token_ids, spoes, tokens
null
20,833
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn from collections import Counter import random ensure_rel = False Label2IdxSub = {"B-H": 1, "I-H": 2, "O": 0} Label2IdxObj = {"B-T": 1, "I-T": 2, "O": 0} train_model = Model().to(device) train_model.compile(loss=Loss(), optimizer=optim.Adam(train_model.parameters(), 1e-5), clip_grad_norm=2.0) def get_metrics(correct_num, predict_num, gold_num): p = correct_num / predict_num if predict_num > 0 else 0 r = correct_num / gold_num if gold_num > 0 else 0 f1 = 2 * p * r / (p + r) if (p + r) > 0 else 0 return { 'correct_num': correct_num, 'predict_num': predict_num, 'gold_num': gold_num, 'precision': p, 'recall': r, 'f1': f1 } def tag_mapping_corres(predict_tags, pre_corres, pre_rels=None, label2idx_sub=None, label2idx_obj=None): """ Args: predict_tags: np.array, (xi, 2, max_sen_len) pre_corres: (seq_len, seq_len) pre_rels: (xi,) """ rel_num = predict_tags.shape[0] pre_triples = [] for idx in range(rel_num): heads, tails = [], [] pred_chunks_sub = get_chunks(predict_tags[idx][0], label2idx_sub) pred_chunks_obj = get_chunks(predict_tags[idx][1], label2idx_obj) pred_chunks = pred_chunks_sub + pred_chunks_obj for ch in pred_chunks: if ch[0] == 'H': heads.append(ch) elif ch[0] == 'T': tails.append(ch) retain_hts = [(h, t) for h in heads for t in tails if pre_corres[h[1]][t[1]] == 1] for h_t in retain_hts: if pre_rels is not None: triple = list(h_t) + [pre_rels[idx]] else: triple = list(h_t) + [idx] pre_triples.append(tuple(triple)) return pre_triples def span2str(triples, tokens): def _concat(token_list): result = '' for idx, t in enumerate(token_list): if idx == 0: result = t elif t.startswith('##'): result += t.lstrip('##') else: result += ' ' + t return result output = [] for triple in triples: rel = triple[-1] sub_tokens = tokens[triple[0][1]:triple[0][-1]] obj_tokens = tokens[triple[1][1]:triple[1][-1]] sub = _concat(sub_tokens) obj = _concat(obj_tokens) output.append((sub, obj, rel)) return output The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data_iterator, mark='Val')` to solve the following problem: Evaluate the model on `steps` batches. Here is the function: def evaluate(data_iterator, mark='Val'): """Evaluate the model on `steps` batches.""" # set model to evaluation mode rel_num = len(predicate2id) predictions = [] ground_truths = [] correct_num, predict_num, gold_num = 0, 0, 0 for batch in tqdm(data_iterator, unit='Batch', ascii=True): # to device input_ids, triples, input_tokens = batch bs, seq_len = input_ids.size() # inference pred_seqs, pre_corres, xi, pred_rels = train_model.predict(input_ids) # (sum(x_i), seq_len) pred_seqs = pred_seqs.detach().cpu().numpy() # (bs, seq_len, seq_len) pre_corres = pre_corres.detach().cpu().numpy() if ensure_rel: # (bs,) xi = np.array(xi) # (sum(s_i),) pred_rels = pred_rels.detach().cpu().numpy() # decode by per batch xi_index = np.cumsum(xi).tolist() # (bs+1,) xi_index.insert(0, 0) for idx in range(bs): if ensure_rel: pre_triples = tag_mapping_corres(predict_tags=pred_seqs[xi_index[idx]:xi_index[idx + 1]], pre_corres=pre_corres[idx], pre_rels=pred_rels[xi_index[idx]:xi_index[idx + 1]], label2idx_sub=Label2IdxSub, label2idx_obj=Label2IdxObj) else: pre_triples = tag_mapping_corres(predict_tags=pred_seqs[idx * rel_num:(idx + 1) * rel_num], pre_corres=pre_corres[idx], label2idx_sub=Label2IdxSub, label2idx_obj=Label2IdxObj) gold_triples = span2str(triples[idx], input_tokens[idx]) pre_triples = span2str(pre_triples, input_tokens[idx]) ground_truths.append(list(set(gold_triples))) predictions.append(list(set(pre_triples))) # counter correct_num += len(set(pre_triples) & set(gold_triples)) predict_num += len(set(pre_triples)) gold_num += len(set(gold_triples)) metrics = get_metrics(correct_num, predict_num, gold_num) return metrics['f1'], metrics['precision'], metrics['recall']
Evaluate the model on `steps` batches.
20,834
import numpy as np from bert4torch.layers import LayerNorm from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn maxlen = 128 def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, segment_ids, spoe The provided code snippet includes necessary dependencies for implementing the `get_spoes` function. Write a Python function `def get_spoes(text, spo_list)` to solve the following problem: 单独抽出来,这样读取数据时候,可以根据spoes来选择跳过 Here is the function: def get_spoes(text, spo_list): '''单独抽出来,这样读取数据时候,可以根据spoes来选择跳过 ''' def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) # 整理三元组 {s: [(o, p)]} spoes = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, segment_ids, spoes
单独抽出来,这样读取数据时候,可以根据spoes来选择跳过
20,835
import numpy as np from bert4torch.layers import LayerNorm from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn device = 'cuda' if torch.cuda.is_available() else 'cpu' d, id2predicate = {}, {} token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) = {} for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) return token_ids, segment_ids, spoe def collate_fn(batch): batch_token_ids, batch_segment_ids = [], [] batch_subject_labels, batch_subject_ids, batch_object_labels = [], [], [] for d in batch: token_ids, segment_ids, spoes = d['token_ids'], d['segment_ids'], d['spoes'] if spoes: # subject标签 subject_labels = np.zeros((len(token_ids), 2)) for s in spoes: subject_labels[s[0], 0] = 1 # subject首 subject_labels[s[1], 1] = 1 # subject尾 # 随机选一个subject(这里没有实现错误!这就是想要的效果!!) start, end = np.array(list(spoes.keys())).T start = np.random.choice(start) end = np.random.choice(end[end >= start]) subject_ids = (start, end) # 对应的object标签 object_labels = np.zeros((len(token_ids), len(predicate2id), 2)) for o in spoes.get(subject_ids, []): object_labels[o[0], o[2], 0] = 1 object_labels[o[1], o[2], 1] = 1 # 构建batch batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_subject_labels.append(subject_labels) batch_subject_ids.append(subject_ids) batch_object_labels.append(object_labels) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_subject_labels = torch.tensor(sequence_padding(batch_subject_labels), dtype=torch.float, device=device) batch_subject_ids = torch.tensor(batch_subject_ids, dtype=torch.long, device=device) batch_object_labels = torch.tensor(sequence_padding(batch_object_labels), dtype=torch.float, device=device) batch_attention_mask = (batch_token_ids != tokenizer._token_pad_id) return [batch_token_ids, batch_segment_ids, batch_subject_ids], [batch_subject_labels, batch_object_labels, batch_attention_mask]
null
20,836
json import numpy as np from bert4torch.layers import LayerNorm from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset from bert4torch.callbacks import Callback from tqdm import tqdm import torch from torch.utils.data import DataLoader, Dataset import torch.optim as optim import torch.nn as nn d, id2predicate = {}, {} with open('E:/data/corpus/relation_extraction/BD_Knowledge_Extraction/all_50_schemas', encoding='utf-8') as f: for l in f: l = json.loads(l) if l['predicate'] not in predicate2id: id2predicate[len(predicate2id)] = l['predicate'] predicate2id[l['predicate']] = len(predicate2id) for s, p, o in spo_list: s = tokenizer.encode(s)[0][1:-1] p = predicate2id[p] o = tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s = (s_idx, s_idx + len(s) - 1) o = (o_idx, o_idx + len(o) - 1, p) if s not in spoes: spoes[s] = [] spoes[s].append(o) with open(filename, encoding='utf-8') as f: for l in tqdm(f): l = json.loads(l) labels = [(spo['subject'], spo['predicate'], spo['object']) for spo in l['spo_list']] token_ids, segment_ids, spoes = get_spoes(l['text'], labels) if spoes: D.append({'text': l['text'], 'spo_list': labels, 'token_ids': token_ids, 'segment_ids': segment_ids, 'spoes': spoes}) # if len(D) > 10000: # break def extract_spoes(text): """抽取输入text所包含的三元组 """ tokens = tokenizer.tokenize(text, maxlen=maxlen) mapping = tokenizer.rematch(text, tokens) token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) token_ids = torch.tensor([token_ids], dtype=torch.long, device=device) segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=device) # 抽取subject seq_output, subject_preds = train_model.predict_subject([token_ids, segment_ids]) subject_preds[:, [0, -1]] *= 0 # 首cls, 尾sep置为0 start = torch.where(subject_preds[0, :, 0] > 0.6)[0] end = torch.where(subject_preds[0, :, 1] > 0.5)[0] subjects = [] for i in start: j = end[end >= i] if len(j) > 0: j = j[0] subjects.append((i.item(), j.item())) if subjects: spoes = [] # token_ids = token_ids.repeat([len(subjects)]+[1]*(len(token_ids.shape)-1)) # segment_ids = segment_ids.repeat([len(subjects)]+[1]*(len(token_ids.shape)-1)) seq_output = seq_output.repeat([len(subjects)]+[1]*(len(seq_output.shape)-1)) subjects = torch.tensor(subjects, dtype=torch.long, device=device) # 传入subject,抽取object和predicate object_preds = train_model.predict_object([seq_output, subjects]) object_preds[:, [0, -1]] *= 0 for subject, object_pred in zip(subjects, object_preds): start = torch.where(object_pred[:, :, 0] > 0.6) end = torch.where(object_pred[:, :, 1] > 0.5) for _start, predicate1 in zip(*start): for _end, predicate2 in zip(*end): if _start <= _end and predicate1 == predicate2: spoes.append( ((mapping[subject[0]][0], mapping[subject[1]][-1]), predicate1.item(), (mapping[_start][0], mapping[_end][-1])) ) break return [(text[s[0]:s[1] + 1], id2predicate[p], text[o[0]:o[1] + 1]) for s, p, o, in spoes] else: return [] class SPO(tuple): """用来存三元组的类 表现跟tuple基本一致,只是重写了 __hash__ 和 __eq__ 方法, 使得在判断两个三元组是否等价时容错性更好。 """ def __init__(self, spo): self.spox = ( tuple(tokenizer.tokenize(spo[0])), spo[1], tuple(tokenizer.tokenize(spo[2])), ) def __hash__(self): return self.spox.__hash__() def __eq__(self, spo): return self.spox == spo.spox The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data)` to solve the following problem: 评估函数,计算f1、precision、recall Here is the function: def evaluate(data): """评估函数,计算f1、precision、recall """ X, Y, Z = 1e-10, 1e-10, 1e-10 f = open('dev_pred.json', 'w', encoding='utf-8') pbar = tqdm() for d in data: R = set([SPO(spo) for spo in extract_spoes(d['text'])]) T = set([SPO(spo) for spo in d['spo_list']]) X += len(R & T) Y += len(R) Z += len(T) f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z pbar.update() pbar.set_description( 'f1: %.5f, precision: %.5f, recall: %.5f' % (f1, precision, recall) ) s = json.dumps({ 'text': d['text'], 'spo_list': list(T), 'spo_list_pred': list(R), 'new': list(R - T), 'lack': list(T - R), }, ensure_ascii=False, indent=4) f.write(s + '\n') pbar.close() f.close() return f1, precision, recall
评估函数,计算f1、precision、recall
20,837
import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import spearmanr from tqdm import tqdm import argparse import numpy as np task_name = args.task_name maxlen = 64 if task_name != 'PAWSX' else 128 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token1_ids, batch_token2_ids, batch_labels = [], [], [] for text1, text2, label in batch: label = int(label > 2.5) if task_name == 'STS-B' else label token1_ids, _ = tokenizer.encode(text1, maxlen=maxlen) batch_token1_ids.append(token1_ids) token2_ids, _ = tokenizer.encode(text2, maxlen=maxlen) batch_token2_ids.append(token2_ids) batch_labels.append([label]) batch_token1_ids = torch.tensor(sequence_padding(batch_token1_ids), dtype=torch.long, device=device) batch_token2_ids = torch.tensor(sequence_padding(batch_token2_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return (batch_token1_ids, batch_token2_ids), batch_labels.flatten()
null
20,838
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import torch.nn.functional as F import argparse import jieba def cal_metric(model_outputs, model_labels): scores, prediction_scores, e_labels = model_outputs labels, mlm_labels, attention_mask = model_labels rep = (e_labels == 1) * attention_mask fix = (e_labels == 0) * attention_mask prediction = prediction_scores.argmax(-1) result = {} result['electra_rep_acc'] = float((prediction*rep).sum()/rep.sum()) result['electra_fix_acc'] = float(1.0 - (prediction*fix).sum()/fix.sum()) result['electra_acc'] = float(((prediction == e_labels) * attention_mask).sum()/attention_mask.sum()) return result The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(filenames)` to solve the following problem: 加载数据(带标签) 单条格式:(文本1, 文本2, 标签) Here is the function: def load_data(filenames): """加载数据(带标签) 单条格式:(文本1, 文本2, 标签) """ D = [] for filename in filenames: with open(filename, encoding='utf-8') as f: for l in f: l = l.strip().split('\t') if len(l) == 3: D.append((l[0], l[1], float(l[2]))) return D
加载数据(带标签) 单条格式:(文本1, 文本2, 标签)
20,839
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import torch.nn.functional as F import argparse import jieba maxlen = 128 if task_name == 'PAWSX' else 64 device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False) tokenizer = Tokenizer(dict_path, do_lower_case=True def mask_tokens(inputs, special_tokens_mask=None): """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ mlm_probability = 0.3 special_tokens = {tokenizer._token_start_id, tokenizer._token_end_id, tokenizer._token_pad_id, tokenizer._token_unk_id, tokenizer._token_mask_id} inputs = inputs.clone() labels = inputs.clone() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = torch.full(labels.shape, mlm_probability) if special_tokens_mask is None: special_tokens_mask = [[val in special_tokens for val in smp] for smp in labels.tolist()] special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = tokenizer._token_mask_id # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(tokenizer._vocab_size, labels.shape, dtype=torch.long, device=device) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels input_ids = [] for text in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] input_ids.append(token_ids) input_ids.extend(input_ids) input_ids = torch.tensor(sequence_padding(input_ids), dtype=torch.long, device=device) labels = torch.arange(len(batch), device=device) mlm_inputs, mlm_labels = mask_tokens(input_ids) attention_mask = input_ids.gt(0).long() return [input_ids, mlm_inputs, attention_mask], [labels, mlm_labels, attention_mask labels = [] labels = torch.tensor(labels, dtype=torch.float, device=device) def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.bert([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return outpu def cal_metric(model_outputs, model_labels): scores, prediction_scores, e_labels = model_outputs labels, mlm_labels, attention_mask = model_labels rep = (e_labels == 1) * attention_mask fix = (e_labels == 0) * attention_mask prediction = prediction_scores.argmax(-1) result = {} result['electra_rep_acc'] = float((prediction*rep).sum()/rep.sum()) result['electra_fix_acc'] = float(1.0 - (prediction*fix).sum()/fix.sum()) result['electra_acc'] = float(((prediction == e_labels) * attention_mask).sum()/attention_mask.sum()) return result def collate_fn(batch): input_ids = [] for text in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] input_ids.append(token_ids) input_ids.extend(input_ids) input_ids = torch.tensor(sequence_padding(input_ids), dtype=torch.long, device=device) labels = torch.arange(len(batch), device=device) # mlm_inputs和mlm_outputs mlm_inputs, mlm_labels = mask_tokens(input_ids) attention_mask = input_ids.gt(0).long() return [input_ids, mlm_inputs, attention_mask], [labels, mlm_labels, attention_mask]
null
20,840
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import torch.nn.functional as F import argparse import jieba maxlen = 128 if task_name == 'PAWSX' else 64 device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False) tokenizer = Tokenizer(dict_path, do_lower_case=True labels = torch.arange(len(batch), device=device) texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, label def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.bert([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return outpu def cal_metric(model_outputs, model_labels): scores, prediction_scores, e_labels = model_outputs labels, mlm_labels, attention_mask = model_labels rep = (e_labels == 1) * attention_mask fix = (e_labels == 0) * attention_mask prediction = prediction_scores.argmax(-1) result = {} result['electra_rep_acc'] = float((prediction*rep).sum()/rep.sum()) result['electra_fix_acc'] = float(1.0 - (prediction*fix).sum()/fix.sum()) result['electra_acc'] = float(((prediction == e_labels) * attention_mask).sum()/attention_mask.sum()) return result def collate_fn_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels
null
20,841
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import torch.nn.functional as F import argparse import jieba labels = torch.arange(len(batch), device=device) attention_mask = input_ids.gt(0).long() labels = [] labels = torch.tensor(labels, dtype=torch.float, device=device) def cal_metric(model_outputs, model_labels): scores, prediction_scores, e_labels = model_outputs labels, mlm_labels, attention_mask = model_labels rep = (e_labels == 1) * attention_mask fix = (e_labels == 0) * attention_mask prediction = prediction_scores.argmax(-1) result = {} result['electra_rep_acc'] = float((prediction*rep).sum()/rep.sum()) result['electra_fix_acc'] = float(1.0 - (prediction*fix).sum()/fix.sum()) result['electra_acc'] = float(((prediction == e_labels) * attention_mask).sum()/attention_mask.sum()) return result def cal_metric(model_outputs, model_labels): scores, prediction_scores, e_labels = model_outputs labels, mlm_labels, attention_mask = model_labels rep = (e_labels == 1) * attention_mask fix = (e_labels == 0) * attention_mask prediction = prediction_scores.argmax(-1) result = {} result['electra_rep_acc'] = float((prediction*rep).sum()/rep.sum()) result['electra_fix_acc'] = float(1.0 - (prediction*fix).sum()/fix.sum()) result['electra_acc'] = float(((prediction == e_labels) * attention_mask).sum()/attention_mask.sum()) return result
null
20,842
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import torch.nn.functional as F import argparse import jieba labels = torch.arange(len(batch), device=device) labels = [] labels = torch.tensor(labels, dtype=torch.float, device=device) def encode(self, token_ids): def cal_metric(model_outputs, model_labels): model = Model(pool_method=pooling).to(device) model.compile(loss=MyLoss(), optimizer=optim.Adam(model.parameters(), 7e-6), metrics=cal_metric) def evaluate(dataloader): # 模型预测 # 标准化,相似度,相关系数 sims_list, labels = [], [] for (a_token_ids, b_token_ids), label in tqdm(dataloader): a_vecs = model.encode(a_token_ids) b_vecs = model.encode(b_token_ids) a_vecs = torch.nn.functional.normalize(a_vecs, p=2, dim=1).cpu().numpy() b_vecs = torch.nn.functional.normalize(b_vecs, p=2, dim=1).cpu().numpy() sims = (a_vecs * b_vecs).sum(axis=1) sims_list.append(sims) labels.append(label.cpu().numpy()) corrcoef = scipy.stats.spearmanr(np.concatenate(labels), np.concatenate(sims_list)).correlation return corrcoef
null
20,843
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(filenames)` to solve the following problem: 加载数据(带标签) 单条格式:(文本1, 文本2, 标签) Here is the function: def load_data(filenames): """加载数据(带标签) 单条格式:(文本1, 文本2, 标签) """ D = [] for filename in filenames: with open(filename, encoding='utf-8') as f: for l in f: l = l.strip().split('\t') if len(l) == 3: D.append((l[0], l[1], float(l[2]))) return D
加载数据(带标签) 单条格式:(文本1, 文本2, 标签)
20,844
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances from scipy.stats import pearsonr, spearmanr import copy import numpy as np from tqdm import tqdm import sys import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = float(args.dropout_rate) model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): texts_list = [[] for _ in range(2)] for text in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[0].append(token_ids) texts_list[1].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, labels train_dataloader = DataLoader(ListDataset(data=train_texts), shuffle=True, batch_size=batch_size, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.model1([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5), ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def collate_fn(batch): texts_list = [[] for _ in range(2)] for text in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[0].append(token_ids) texts_list[1].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, labels
null
20,845
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances from scipy.stats import pearsonr, spearmanr import copy import numpy as np from tqdm import tqdm import sys import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = float(args.dropout_rate) model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): texts_list = [[] for _ in range(2)] for text in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[0].append(token_ids) texts_list[1].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, labels train_dataloader = DataLoader(ListDataset(data=train_texts), shuffle=True, batch_size=batch_size, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.model1([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5), ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def collate_fn_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels
null
20,846
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances from scipy.stats import pearsonr, spearmanr import copy import numpy as np from tqdm import tqdm import sys import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = float(args.dropout_rate) model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): texts_list = [[] for _ in range(2)] for text in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[0].append(token_ids) texts_list[1].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, labels train_dataloader = DataLoader(ListDataset(data=train_texts), shuffle=True, batch_size=batch_size, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.model1([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5), ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def evaluate(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine
null
20,848
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import pearsonr, spearmanr import copy import random from tqdm import tqdm import numpy as np import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1, type=float) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = args.dropout_rate model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): texts_list = [[] for _ in range(2)] labels = [] pos_id = random.randint(0, len(batch)-1) pos_token_ids, _ = tokenizer.encode(batch[pos_id], maxlen=maxlen) texts_list[0].append(pos_token_ids) texts_list[1].append(pos_token_ids) labels.append(1) for neg_id in range(len(batch)): if neg_id == pos_id: continue elif random.random() < 0.5: neg_token_ids, _ = tokenizer.encode(batch[neg_id], maxlen=maxlen) texts_list[0].append(pos_token_ids) texts_list[1].append(neg_token_ids) labels.append(0) else: neg_token_ids, _ = tokenizer.encode(batch[neg_id], maxlen=maxlen) texts_list[0].append(neg_token_ids) texts_list[1].append(pos_token_ids) labels.append(0) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels train_dataloader = DataLoader(ListDataset(data=train_texts), batch_size=batch_size, shuffle=True, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pool_cls = self.model1([token_ids]) output = get_pool_emb(hidden_state, pool_cls, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) BCEWithLogitsLoss(reduction='mean'), optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率 ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def collate_fn(batch): texts_list = [[] for _ in range(2)] labels = [] pos_id = random.randint(0, len(batch)-1) pos_token_ids, _ = tokenizer.encode(batch[pos_id], maxlen=maxlen) texts_list[0].append(pos_token_ids) texts_list[1].append(pos_token_ids) labels.append(1) for neg_id in range(len(batch)): if neg_id == pos_id: continue elif random.random() < 0.5: neg_token_ids, _ = tokenizer.encode(batch[neg_id], maxlen=maxlen) texts_list[0].append(pos_token_ids) texts_list[1].append(neg_token_ids) labels.append(0) else: neg_token_ids, _ = tokenizer.encode(batch[neg_id], maxlen=maxlen) texts_list[0].append(neg_token_ids) texts_list[1].append(pos_token_ids) labels.append(0) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels
null
20,849
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import pearsonr, spearmanr import copy import random from tqdm import tqdm import numpy as np import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1, type=float) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = args.dropout_rate model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): texts_list = [[] for _ in range(2)] labels = [] pos_id = random.randint(0, len(batch)-1) pos_token_ids, _ = tokenizer.encode(batch[pos_id], maxlen=maxlen) texts_list[0].append(pos_token_ids) texts_list[1].append(pos_token_ids) labels.append(1) for neg_id in range(len(batch)): if neg_id == pos_id: continue elif random.random() < 0.5: neg_token_ids, _ = tokenizer.encode(batch[neg_id], maxlen=maxlen) texts_list[0].append(pos_token_ids) texts_list[1].append(neg_token_ids) labels.append(0) else: neg_token_ids, _ = tokenizer.encode(batch[neg_id], maxlen=maxlen) texts_list[0].append(neg_token_ids) texts_list[1].append(pos_token_ids) labels.append(0) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels train_dataloader = DataLoader(ListDataset(data=train_texts), batch_size=batch_size, shuffle=True, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pool_cls = self.model1([token_ids]) output = get_pool_emb(hidden_state, pool_cls, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) BCEWithLogitsLoss(reduction='mean'), optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率 ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def collate_fn_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels
null
20,850
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import pearsonr, spearmanr import copy import random from tqdm import tqdm import numpy as np import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1, type=float) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = args.dropout_rate model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): texts_list = [[] for _ in range(2)] labels = [] pos_id = random.randint(0, len(batch)-1) pos_token_ids, _ = tokenizer.encode(batch[pos_id], maxlen=maxlen) texts_list[0].append(pos_token_ids) texts_list[1].append(pos_token_ids) labels.append(1) for neg_id in range(len(batch)): if neg_id == pos_id: continue elif random.random() < 0.5: neg_token_ids, _ = tokenizer.encode(batch[neg_id], maxlen=maxlen) texts_list[0].append(pos_token_ids) texts_list[1].append(neg_token_ids) labels.append(0) else: neg_token_ids, _ = tokenizer.encode(batch[neg_id], maxlen=maxlen) texts_list[0].append(neg_token_ids) texts_list[1].append(pos_token_ids) labels.append(0) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels train_dataloader = DataLoader(ListDataset(data=train_texts), batch_size=batch_size, shuffle=True, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pool_cls = self.model1([token_ids]) output = get_pool_emb(hidden_state, pool_cls, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) BCEWithLogitsLoss(reduction='mean'), optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率 ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def evaluate(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine
null
20,851
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import argparse import jieba The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(filenames)` to solve the following problem: 加载数据(带标签) 单条格式:(文本1, 文本2, 标签) Here is the function: def load_data(filenames): """加载数据(带标签) 单条格式:(文本1, 文本2, 标签) """ D = [] for filename in filenames: with open(filename, encoding='utf-8') as f: for l in f: l = l.strip().split('\t') if len(l) == 3: D.append((l[0], l[1], float(l[2]))) return D
加载数据(带标签) 单条格式:(文本1, 文本2, 标签)
20,852
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import argparse import jieba maxlen = 128 if task_name == 'PAWSX' else 64 device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False) tokenizer = Tokenizer(dict_path, do_lower_case=True texts_list = [[] for _ in range(2)] for text in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] texts_list[0].append(token_ids) texts_list[1].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, label texts_list = [[] for _ in range(2)] labels = [] for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, label def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.bert([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return outpu def collate_fn(batch): texts_list = [[] for _ in range(2)] for text in batch: token_ids = tokenizer.encode(text, maxlen=maxlen)[0] texts_list[0].append(token_ids) texts_list[1].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, labels
null
20,853
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import argparse import jieba maxlen = 128 if task_name == 'PAWSX' else 64 device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False) tokenizer = Tokenizer(dict_path, do_lower_case=True texts_list = [[] for _ in range(2)] for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, label texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, label def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.bert([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return outpu def collate_fn_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels
null
20,854
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch from bert4torch.snippets import ListDataset import argparse import jieba labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) labels = [] labels = torch.tensor(labels, dtype=torch.float, device=device) def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.bert([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return output model = model.compile(loss=nn.CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), 1e-5)) def evaluate(dataloader): # 模型预测 # 标准化,相似度,相关系数 sims_list, labels = [], [] for (a_token_ids, b_token_ids), label in tqdm(dataloader): a_vecs = model.encode(a_token_ids) b_vecs = model.encode(b_token_ids) a_vecs = torch.nn.functional.normalize(a_vecs, p=2, dim=1).cpu().numpy() b_vecs = torch.nn.functional.normalize(b_vecs, p=2, dim=1).cpu().numpy() sims = (a_vecs * b_vecs).sum(axis=1) sims_list.append(sims) labels.append(label.cpu().numpy()) corrcoef = scipy.stats.spearmanr(np.concatenate(labels), np.concatenate(sims_list)).correlation return corrcoef
null
20,855
import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import spearmanr from tqdm import tqdm import numpy as np import argparse maxlen = 64 if task_name != 'PAWSX' else 128 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token_ids, batch_labels = [], [] for text1, text2, label in batch: for text in [text1, text2]: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.float, device=device) return batch_token_ids, batch_labels.flatten()
null
20,856
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch import random import copy import argparse from bert4torch.snippets import ListDataset import jieba The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(filenames)` to solve the following problem: 加载数据(带标签) 单条格式:(文本1, 文本2, 标签) Here is the function: def load_data(filenames): """加载数据(带标签) 单条格式:(文本1, 文本2, 标签) """ D = [] for filename in filenames: with open(filename, encoding='utf-8') as f: for l in f: l = l.strip().split('\t') if len(l) == 3: D.append((l[0], l[1], float(l[2]))) return D
加载数据(带标签) 单条格式:(文本1, 文本2, 标签)
20,857
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch import random import copy import argparse from bert4torch.snippets import ListDataset import jieba maxlen = 128 if task_name == 'PAWSX' else 64 device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False) tokenizer = Tokenizer(dict_path, do_lower_case=True def encode(self, token_ids): hidden_state, pooler = self.encoder([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return outpu def collate_fn_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels
null
20,858
from bert4torch.snippets import sequence_padding from tqdm import tqdm import numpy as np import scipy.stats from bert4torch.models import build_transformer_model, BaseModel from bert4torch.tokenizers import Tokenizer from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, get_pool_emb from torch.utils.data import DataLoader from torch import optim, nn import torch import random import copy import argparse from bert4torch.snippets import ListDataset import jieba def encode(self, token_ids): hidden_state, pooler = self.encoder([token_ids]) output = get_pool_emb(hidden_state, pooler, token_ids.gt(0).long(), self.pool_method) return output model = model.compile(loss=nn.CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), 1e-5), scheduler=Momentum(gamma=0.95)) def evaluate(dataloader): # 模型预测 # 标准化,相似度,相关系数 model.eval() sims_list, labels = [], [] for (a_token_ids, b_token_ids), label in tqdm(dataloader): a_vecs = model.encode(a_token_ids) b_vecs = model.encode(b_token_ids) a_vecs = torch.nn.functional.normalize(a_vecs, p=2, dim=1).cpu().numpy() b_vecs = torch.nn.functional.normalize(b_vecs, p=2, dim=1).cpu().numpy() sims = (a_vecs * b_vecs).sum(axis=1) sims_list.append(sims) labels.append(label.cpu().numpy()) corrcoef = scipy.stats.spearmanr(np.concatenate(labels), np.concatenate(sims_list)).correlation return corrcoef
null
20,859
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb from bert4torch.layers import BERT_WHITENING from tqdm import tqdm import torch from torch.utils.data import DataLoader import scipy.stats import argparse import jieba maxlen = 128 if task_name == 'PAWSX' else 64 device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False) tokenizer = Tokenizer(dict_path, do_lower_case=True def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pool_cls = self.bert([token_ids]) attention_mask = token_ids.gt(0).long() output = get_pool_emb(hidden_state, pool_cls, attention_mask, self.pool_method) return outpu torch.cat(all_sims, dim=0) def collate_fn(batch): batch_token1_ids, batch_token2_ids, batch_labels = [], [], [] for text1, text2, label in batch: token1_ids, _ = tokenizer.encode(text1, maxlen=maxlen) batch_token1_ids.append(token1_ids) token2_ids, _ = tokenizer.encode(text2, maxlen=maxlen) batch_token2_ids.append(token2_ids) batch_labels.append([label]) batch_token1_ids = torch.tensor(sequence_padding(batch_token1_ids), dtype=torch.long, device=device) batch_token2_ids = torch.tensor(sequence_padding(batch_token2_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.float, device=device) return (batch_token1_ids, batch_token2_ids), batch_labels.flatten()
null
20,860
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm from bert4torch.tokenizers import Tokenizer, load_vocab from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import ListDataset, sequence_padding from bert4torch.callbacks import Callback from torch.utils.data import DataLoader from scipy.stats import pearsonr, spearmanr import numpy as np import argparse import jieba template_len = 15 maxlen = template_len + (128 if task_name == 'PAWSX' else 64) tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False), add_special_tokens='[X]' tokenizer = Tokenizer(dict_path, do_lower_case=True, add_special_tokens='[X]') replace_token = prompt_templates = ['"{}" 的意思为[MASK]'.format(replace_token), '"{}"这句话的意思是[MASK]'.format(replace_token)] D = [] for filename in filenames: with open(filename, 'r', encoding='utf-8') as f: for line in tqdm(f.readlines(), desc='Load data'): cache = line.split('\t') text1, text2, label = cache[0][:maxlen-template_len], cache[1][:maxlen-template_len], cache[-1] for text in [text1, text2]: sentence_pair = [] for template in prompt_templates: sent_num = len(tokenizer.tokenize(text)) prompt_sent = template.replace(replace_token, text) template_sent = template.replace(replace_token, replace_token * sent_num) sentence_pair.extend([prompt_sent, template_sent]) D.append((sentence_pair, int(label))) return for text1, text2 in batch: label = text1[-1] text1, text2 = text1[0][0], text2[0][0] text1_ids.append(tokenizer.encode(text1, maxlen=maxlen)[0]) text2_ids.append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) def load_data(filenames): D = [] for filename in filenames: with open(filename, 'r', encoding='utf-8') as f: for line in tqdm(f.readlines(), desc='Load data'): cache = line.split('\t') text1, text2, label = cache[0][:maxlen-template_len], cache[1][:maxlen-template_len], cache[-1] for text in [text1, text2]: sentence_pair = [] for template in prompt_templates: sent_num = len(tokenizer.tokenize(text)) prompt_sent = template.replace(replace_token, text) template_sent = template.replace(replace_token, replace_token * sent_num) sentence_pair.extend([prompt_sent, template_sent]) D.append((sentence_pair, int(label))) return D
null
20,861
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm from bert4torch.tokenizers import Tokenizer, load_vocab from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import ListDataset, sequence_padding from bert4torch.callbacks import Callback from torch.utils.data import DataLoader from scipy.stats import pearsonr, spearmanr import numpy as np import argparse import jieba maxlen = template_len + (128 if task_name == 'PAWSX' else 64) device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False), add_special_tokens='[X]' tokenizer = Tokenizer(dict_path, do_lower_case=True, add_special_tokens='[X]' batch_tensor = [[] for _ in range(4)] for prompt_data, _ in batch: for i, item in enumerate(prompt_data): batch_tensor[i].append(tokenizer.encode(item, maxlen=maxlen)[0]) for i, item in enumerate(batch_tensor): batch_tensor[i] = torch.tensor(sequence_padding(item, maxlen), dtype=torch.long, device=device) labels = torch.arange(batch_tensor[0].size(0), device=device) return batch_tensor, label labels = torch.tensor(labels, dtype=torch.long, device=device) def collate_fn(batch): batch_tensor = [[] for _ in range(4)] for prompt_data, _ in batch: for i, item in enumerate(prompt_data): batch_tensor[i].append(tokenizer.encode(item, maxlen=maxlen)[0]) for i, item in enumerate(batch_tensor): batch_tensor[i] = torch.tensor(sequence_padding(item, maxlen), dtype=torch.long, device=device) labels = torch.arange(batch_tensor[0].size(0), device=device) return batch_tensor, labels
null
20,862
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm from bert4torch.tokenizers import Tokenizer, load_vocab from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import ListDataset, sequence_padding from bert4torch.callbacks import Callback from torch.utils.data import DataLoader from scipy.stats import pearsonr, spearmanr import numpy as np import argparse import jieba maxlen = template_len + (128 if task_name == 'PAWSX' else 64) device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False), add_special_tokens='[X]' tokenizer = Tokenizer(dict_path, do_lower_case=True, add_special_tokens='[X]' labels = torch.arange(batch_tensor[0].size(0), device=device) text1_ids, text2_ids, labels = [], [], [] for text1, text2 in batch: label = text1[-1] text1, text2 = text1[0][0], text2[0][0] text1_ids.append(tokenizer.encode(text1, maxlen=maxlen)[0]) text2_ids.append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) text1_ids = torch.tensor(sequence_padding(text1_ids), dtype=torch.long, device=device) text2_ids = torch.tensor(sequence_padding(text2_ids), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.long, device=device) return [text1_ids, text2_ids], label def collate_fn_test(batch): text1_ids, text2_ids, labels = [], [], [] for text1, text2 in batch: label = text1[-1] text1, text2 = text1[0][0], text2[0][0] text1_ids.append(tokenizer.encode(text1, maxlen=maxlen)[0]) text2_ids.append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) text1_ids = torch.tensor(sequence_padding(text1_ids), dtype=torch.long, device=device) text2_ids = torch.tensor(sequence_padding(text2_ids), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.long, device=device) return [text1_ids, text2_ids], labels
null
20,863
from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything from bert4torch.losses import ContrastiveLoss import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import spearmanr from tqdm import tqdm import argparse import numpy as np task_name = args.task_name maxlen = 64 if task_name != 'PAWSX' else 128 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn(batch): batch_token1_ids, batch_token2_ids, batch_labels = [], [], [] for text1, text2, label in batch: token1_ids, _ = tokenizer.encode(text1, maxlen=maxlen) batch_token1_ids.append(token1_ids) token2_ids, _ = tokenizer.encode(text2, maxlen=maxlen) batch_token2_ids.append(token2_ids) batch_labels.append([int(label>2.5) if task_name == 'STS-B' else label]) batch_token1_ids = torch.tensor(sequence_padding(batch_token1_ids), dtype=torch.long, device=device) batch_token2_ids = torch.tensor(sequence_padding(batch_token2_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.float, device=device) return (batch_token1_ids, batch_token2_ids), batch_labels.flatten()
null
20,864
om bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader, Dataset from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import spearmanr from tqdm import tqdm import argparse import numpy as np = argparse.ArgumentParser() parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) args = parser.parse_args() pooling = args.pooling task_name = args.task_name maxlen = 64 if task_name != 'PAWSX' else 128 batch_size = 32 config_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json' checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin' dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt' device = 'cuda' if torch.cuda.is_available() else 'cpu' seed_everything(42) = Tokenizer(dict_path, do_lower_case=True) aloader = DataLoader(MyDataset(f'E:/data/corpus/sentence_embedding/{task_name}/{task_name}.train.data'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn) valid_dataloader = DataLoader(MyDataset(f'E:/data/corpus/sentence_embedding/{task_name}/{task_name}.valid.data'), batch_size=batch_size, collate_fn=collate_fn) test_dataloader = DataLoader(MyDataset(f'E:/data/corpus/sentence_embedding/{task_name}/{task_name}.test.data'), batch_size=batch_size, collate_fn=collate_fn) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.bert([token_ids]) attention_mask = token_ids.gt(0).long() output = get_pool_emb(hidden_state, pooler, attention_mask, self.pool_method) return output model = Model().to(device) MSELoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5) ) e(model, data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), batch_labels in tqdm(data, desc='Evaluate'): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - paired_cosine_distances(embeddings1, embeddings2) cosine_scores.append(cosine_score) labels.append(batch_labels.cpu().numpy()) labels = np.concatenate(labels) cosine_scores = np.concatenate(cosine_scores) eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator]) def collate_fn(batch): batch_token1_ids, batch_token2_ids, batch_labels = [], [], [] for text1, text2, label in batch: token1_ids, _ = tokenizer.encode(text1, maxlen=maxlen) batch_token1_ids.append(token1_ids) token2_ids, _ = tokenizer.encode(text2, maxlen=maxlen) batch_token2_ids.append(token2_ids) batch_labels.append([label]) batch_token1_ids = torch.tensor(sequence_padding(batch_token1_ids), dtype=torch.long, device=device) batch_token2_ids = torch.tensor(sequence_padding(batch_token2_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.float, device=device) return (batch_token1_ids, batch_token2_ids), batch_labels.flatten()
null
20,865
om bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader, Dataset from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import spearmanr from tqdm import tqdm import argparse import numpy as np = argparse.ArgumentParser() parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) args = parser.parse_args() pooling = args.pooling task_name = args.task_name maxlen = 64 if task_name != 'PAWSX' else 128 batch_size = 32 config_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json' checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin' dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt' device = 'cuda' if torch.cuda.is_available() else 'cpu' seed_everything(42) = Tokenizer(dict_path, do_lower_case=True) aloader = DataLoader(MyDataset(f'E:/data/corpus/sentence_embedding/{task_name}/{task_name}.train.data'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn) valid_dataloader = DataLoader(MyDataset(f'E:/data/corpus/sentence_embedding/{task_name}/{task_name}.valid.data'), batch_size=batch_size, collate_fn=collate_fn) test_dataloader = DataLoader(MyDataset(f'E:/data/corpus/sentence_embedding/{task_name}/{task_name}.test.data'), batch_size=batch_size, collate_fn=collate_fn) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pooler = self.bert([token_ids]) attention_mask = token_ids.gt(0).long() output = get_pool_emb(hidden_state, pooler, attention_mask, self.pool_method) return output model = Model().to(device) MSELoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5) ) e(model, data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), batch_labels in tqdm(data, desc='Evaluate'): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - paired_cosine_distances(embeddings1, embeddings2) cosine_scores.append(cosine_score) labels.append(batch_labels.cpu().numpy()) labels = np.concatenate(labels) cosine_scores = np.concatenate(cosine_scores) eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator]) def evaluate(model, data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), batch_labels in tqdm(data, desc='Evaluate'): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - paired_cosine_distances(embeddings1, embeddings2) cosine_scores.append(cosine_score) labels.append(batch_labels.cpu().numpy()) labels = np.concatenate(labels) cosine_scores = np.concatenate(cosine_scores) eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine
null
20,866
from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset from tqdm import tqdm from sentence_transformers import evaluation from config import config_path, checkpoint_path, dict_path, fst_train_file, fst_dev_file, ir_path import numpy as np import pandas as pd maxlen = 64 device = 'cuda' if torch.cuda.is_available() else 'cpu'step1 model'.center(60, '-')) = Tokenizer(dict_path, do_lower_case=True) if choice in {'raw', 'mul_ce'}: # 原始模式,可能同一个batch中会出现重复标问 elif choice == 'random': def encode(self, texts, **kwargs): token_ids_list = [] for text in texts: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) token_ids_list.append(token_ids) token_ids_tensor = torch.tensor(sequence_padding(token_ids_list), dtype=torch.long) valid_dataloader = DataLoader(TensorDataset(token_ids_tensor), batch_size=batch_size) valid_sen_emb = [] self.eval() for token_ids in tqdm(valid_dataloader, desc='Evaluate'): token_ids = token_ids[0].to(device) output = self.predict(token_ids) valid_sen_emb.append(output.cpu()) valid_sen_emb = torch.cat(valid_sen_emb, dim=0) return valid_sen_em def collate_fn(batch): if choice == 'raw': labels = torch.arange(len(batch), device=device) else: labels = torch.eye(len(batch), dtype=torch.long, device=device) # 定位相同元素 for i, (q_std1, _) in enumerate(batch): for j, (q_std2, _) in enumerate(batch[i+1:], start=i+1): if q_std1 == q_std2: labels[i, j] = 1 labels[j, i] = 1 texts_list = [[] for _ in range(2)] for texts in batch: for i, text in enumerate(texts): token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[i].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) return texts_list, labels
null
20,867
from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset from tqdm import tqdm from sentence_transformers import evaluation from config import config_path, checkpoint_path, dict_path, fst_train_file, fst_dev_file, ir_path import numpy as np import pandas as pd maxlen = 64 device = 'cuda' if torch.cuda.is_available() else 'cpu'step1 model'.center(60, '-')) = Tokenizer(dict_path, do_lower_case=True) if choice in {'raw', 'mul_ce'}: # 原始模式,可能同一个batch中会出现重复标问 elif choice == 'random': def encode(self, texts, **kwargs): token_ids_list = [] for text in texts: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) token_ids_list.append(token_ids) token_ids_tensor = torch.tensor(sequence_padding(token_ids_list), dtype=torch.long) valid_dataloader = DataLoader(TensorDataset(token_ids_tensor), batch_size=batch_size) valid_sen_emb = [] self.eval() for token_ids in tqdm(valid_dataloader, desc='Evaluate'): token_ids = token_ids[0].to(device) output = self.predict(token_ids) valid_sen_emb.append(output.cpu()) valid_sen_emb = torch.cat(valid_sen_emb, dim=0) return valid_sen_em def collate_fn(batch): texts_list = [[] for _ in range(2)] for text_list in batch: # q_std有0.5的概率被抽样到 p = [0.5] + [0.5/(len(text_list)-1)] * (len(text_list)-1) texts = np.random.choice(text_list, 2, replace=False, p=p) for i, text in enumerate(texts): token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[i].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, labels
null
20,868
import torch from torch import Tensor import numpy as np import pandas as pd The provided code snippet includes necessary dependencies for implementing the `cos_sim` function. Write a Python function `def cos_sim(vector_a, vector_b)` to solve the following problem: 计算两个向量之间的余弦相似度 :param vector_a: 向量 a :param vector_b: 向量 b :return: sim Here is the function: def cos_sim(vector_a, vector_b): """ 计算两个向量之间的余弦相似度 :param vector_a: 向量 a :param vector_b: 向量 b :return: sim """ vector_a = np.mat(vector_a) vector_b = np.mat(vector_b) num = float(vector_a * vector_b.T) denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b) cos = num / denom sim = 0.5 + 0.5 * cos return sim
计算两个向量之间的余弦相似度 :param vector_a: 向量 a :param vector_b: 向量 b :return: sim
20,869
import torch from torch import Tensor import numpy as np import pandas as pd def cos_sim4matrix(arr, brr): return 0.5 + 0.5 * (arr.dot(brr.T) / (np.sqrt(np.sum(arr * arr)) * np.sqrt(np.sum(brr * brr, axis = 1))))
null
20,870
import torch from torch import Tensor import numpy as np import pandas as pd def cos_sim4matrix_2(arr, brr): return (arr.dot(brr.T) / (np.sqrt(np.sum(arr * arr)) * np.sqrt(np.sum(brr * brr, axis=1))))
null
20,871
import torch from torch import Tensor import numpy as np import pandas as pd The provided code snippet includes necessary dependencies for implementing the `read_q_std_q_corpus` function. Write a Python function `def read_q_std_q_corpus(q_std_file, q_std_vectors_file, q_corpus_file, q_corpus_vectors_file)` to solve the following problem: 读取q_std、q_corpus语料和向量 Here is the function: def read_q_std_q_corpus(q_std_file, q_std_vectors_file, q_corpus_file, q_corpus_vectors_file): '''读取q_std、q_corpus语料和向量 ''' print('读取标准问及其向量'.center(60, '-')) q_std_list = pd.read_csv(q_std_file, sep="\t", names=['c']).c.tolist() q_std_sentence_embeddings = np.load(q_std_vectors_file) print('标准问shape:', q_std_sentence_embeddings.shape, len(q_std_list)) print('读取所有语料及其向量'.center(60, '-')) q_all = pd.read_csv(q_corpus_file, sep="\t", names=['c']).c.tolist() q_all_sentence_embeddings = np.load(q_corpus_vectors_file) q_all_sentence_embeddings_dict = {q_all[i]: q_all_sentence_embeddings[i] for i in range(0, len(q_all))} print('所有语料shape', q_all_sentence_embeddings.shape, len(q_all)) return q_std_list, q_std_sentence_embeddings, q_all, q_all_sentence_embeddings_dict
读取q_std、q_corpus语料和向量
20,872
import torch from torch import Tensor import numpy as np import pandas as pd def pytorch_cos_sim(a: Tensor, b: Tensor): if not isinstance(a, torch.Tensor): a = torch.tensor(a) if not isinstance(b, torch.Tensor): b = torch.tensor(b) if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) a_norm = a / a.norm(dim=1)[:, None] b_norm = b / b.norm(dim=1)[:, None] return torch.mm(a_norm, b_norm.transpose(0, 1)) def cos_sim_1(vector_a, vector_b): """ 计算两个向量之间的余弦相似度 :param vector_a: 向量 a :param vector_b: 向量 b :return: sim """ vector_a = np.mat(vector_a) vector_b = np.mat(vector_b) num = float(vector_a * vector_b.T) denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b) cos = num / denom return cos The provided code snippet includes necessary dependencies for implementing the `cal_performance` function. Write a Python function `def cal_performance(model, q_all_sentence_embeddings_dict, q_std_sentence_embeddings, q_std_list, df_eval, K=20)` to solve the following problem: 计算召回topK的指标 Here is the function: def cal_performance(model, q_all_sentence_embeddings_dict, q_std_sentence_embeddings, q_std_list, df_eval, K=20): '''计算召回topK的指标 ''' texts = df_eval.q_sim.tolist() texts_in = [v for v in texts if v in q_all_sentence_embeddings_dict.keys()] texts_out = [v for v in texts if v not in q_all_sentence_embeddings_dict.keys()] texts_out_embeddings = model.encode(texts_out) if texts_out else [] texts_embeddings_dict_1 = {texts_in[i]: q_all_sentence_embeddings_dict[texts_in[i]] for i in range(0, len(texts_in))} texts_embeddings_dict_2 = {texts_out[i]: texts_out_embeddings[i] for i in range(0, len(texts_out))} texts_embeddings_dict = {**texts_embeddings_dict_1, **texts_embeddings_dict_2} print(f'计算相似度 K= {K}'.center(60, '-')) df_eval['ifin'] = df_eval.q_std.apply(lambda v: 1 if v in q_std_list else 0) print("目标语料标问是否存在:——>", df_eval.groupby("ifin")["ifin"].count()) print('----计算所有query和q_std的相似度') x_texts_embeddings = np.array([texts_embeddings_dict[x_text] for x_text in texts]) cos_scores = pytorch_cos_sim(x_texts_embeddings, q_std_sentence_embeddings).cpu() print('shape: ', x_texts_embeddings.shape, q_std_sentence_embeddings.shape, cos_scores.shape) print(f'----为每条相似问找到相似度最大的{K}条标问'.center(60, '-')) cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk(cos_scores, K, dim=1, largest=True, sorted=False) cos_scores_top_k_values = cos_scores_top_k_values.tolist() cos_scores_top_k_idx = cos_scores_top_k_idx.tolist() cos_q_corpus_sort = [[q_std_list[v] for v in vlist] for vlist in cos_scores_top_k_idx] # 最相似的TopK个标问 result = [list(zip(cos_q_corpus_sort[i], cos_scores_top_k_values[i])) for i in range(0, len(texts))] texts_topk_dict = {texts[i]: result[i] for i in range(0, len(texts))} # 拿到每个相似问的预测结果,topK的预测标问和对应的相似度 df_eval['q_std_pred_list'] = df_eval.q_sim.map(texts_topk_dict) # 计算q_sim和q_std之间的相似度 df_eval['prob_with_std'] = df_eval.apply(lambda row: cos_sim_1(texts_embeddings_dict[row['q_sim']], q_std_sentence_embeddings[q_std_list.index(row['q_std'])]), axis=1) df_eval.loc[:, 'q_std_pred'] = df_eval.q_std_pred_list.apply(lambda v: v[0][0]) df_eval.loc[:, 'prob'] = df_eval.q_std_pred_list.apply(lambda v: v[0][1]) # df_eval.loc[:,'q_std_pred_list_pair']=df_eval.apply(lambda row: [(row['q_std'],row['q_sim'],v[0],v[1]) for v in row['q_std_pred_list']],axis=1) df_eval['q_std_pred_list_v1'] = df_eval.q_std_pred_list.apply(lambda v: [k[0] for k in v]) # 只保留预测的标准问句 df_eval['q_std_pred_list_v2'] = df_eval.q_std_pred_list.apply(lambda v: [k[1] for k in v]) # 只保留预测的概率 df_eval['t1'] = df_eval.apply(lambda row: 1 if row['q_std'] in row['q_std_pred_list_v1'][0:1] else 0, axis=1) df_eval['t3'] = df_eval.apply(lambda row: 1 if row['q_std'] in row['q_std_pred_list_v1'][0:3] else 0, axis=1) df_eval['t5'] = df_eval.apply(lambda row: 1 if row['q_std'] in row['q_std_pred_list_v1'][0:5] else 0, axis=1) df_eval['t10'] = df_eval.apply(lambda row: 1 if row['q_std'] in row['q_std_pred_list_v1'][0:10] else 0, axis=1) print('----模型准确率: ', df_eval.t1.sum() / df_eval.shape[0], df_eval.t3.sum() / df_eval.shape[0], df_eval.t5.sum() / df_eval.shape[0], df_eval.t10.sum() / df_eval.shape[0]) df_eval_need = df_eval[df_eval.ifin == 1] print('----模型准确率:[有效标问]:', df_eval_need.t1.sum() / df_eval_need.shape[0], df_eval_need.t3.sum() / df_eval_need.shape[0], df_eval_need.t5.sum() / df_eval_need.shape[0], df_eval_need.t10.sum() / df_eval_need.shape[0]) return df_eval
计算召回topK的指标
20,873
from bert4torch.losses import ContrastiveLoss from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset from tqdm import tqdm from config import config_path, checkpoint_path, dict_path, sec_train_file, sec_dev_file import numpy as np from sklearn.metrics.pairwise import paired_cosine_distances from sklearn.metrics import roc_auc_score maxlen = 64 device = 'cuda' if torch.cuda.is_available() else 'cpu' def encode(self, texts): token_ids_list = [] for text in texts: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) token_ids_list.append(token_ids) token_ids_tensor = torch.tensor(sequence_padding(token_ids_list), dtype=torch.long) valid_dataloader = DataLoader(TensorDataset(token_ids_tensor), batch_size=batch_size) valid_sen_emb = [] for token_ids in tqdm(valid_dataloader, desc='Evaluate'): token_ids = token_ids[0].to(device) output = self.predict(token_ids) valid_sen_emb.append(output.cpu()) valid_sen_emb = torch.cat(valid_sen_emb, dim=0) return valid_sen_em def collate_fn(batch): tokens_ids_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: tokens_ids_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) tokens_ids_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, token_ids in enumerate(tokens_ids_list): tokens_ids_list[i] = torch.tensor(sequence_padding(token_ids), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.long, device=device) return tokens_ids_list, labels
null
20,875
ort Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import pearsonr, spearmanr import numpy as np import re from tqdm import tqdm import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1, type=float) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = args.dropout_rate model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): def add_noise(token_ids, del_ratio=0.6): n = len(token_ids) keep_or_not = np.random.rand(n) > del_ratio if sum(keep_or_not) == 0: keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains return list(np.array(token_ids)[keep_or_not]) texts_list = [[] for _ in range(3)] for text in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[0].append([tokenizer._token_start_id] + add_noise(token_ids[1:-1]) + [tokenizer._token_end_id]) texts_list[1].append(token_ids[:-1]) texts_list[2].append(token_ids[1:]) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) return texts_list[:2], texts_list[2].flatten() train_dataloader = DataLoader(ListDataset(data=train_texts), shuffle=True, batch_size=batch_size, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pool_cls = self.encoder([token_ids]) output = get_pool_emb(hidden_state, pool_cls, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), lr=2e-4), ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def collate_fn(batch): def add_noise(token_ids, del_ratio=0.6): n = len(token_ids) keep_or_not = np.random.rand(n) > del_ratio if sum(keep_or_not) == 0: keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains return list(np.array(token_ids)[keep_or_not]) texts_list = [[] for _ in range(3)] for text in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[0].append([tokenizer._token_start_id] + add_noise(token_ids[1:-1]) + [tokenizer._token_end_id]) texts_list[1].append(token_ids[:-1]) texts_list[2].append(token_ids[1:]) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) return texts_list[:2], texts_list[2].flatten()
null
20,876
ort Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import pearsonr, spearmanr import numpy as np import re from tqdm import tqdm import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1, type=float) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = args.dropout_rate model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): texts_list = [[] for _ in range(3)] for text in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[0].append([tokenizer._token_start_id] + add_noise(token_ids[1:-1]) + [tokenizer._token_end_id]) texts_list[1].append(token_ids[:-1]) texts_list[2].append(token_ids[1:]) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) return texts_list[:2], texts_list[2].flatten() train_dataloader = DataLoader(ListDataset(data=train_texts), shuffle=True, batch_size=batch_size, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pool_cls = self.encoder([token_ids]) output = get_pool_emb(hidden_state, pool_cls, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), lr=2e-4), ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def collate_fn_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels
null
20,877
ort Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import pearsonr, spearmanr import numpy as np import re from tqdm import tqdm import argparse import jieba jieba.initialize() = argparse.ArgumentParser() parser.add_argument('--model_type', default='BERT', choices=['BERT', 'RoBERTa', 'NEZHA', 'RoFormer', 'SimBERT']) parser.add_argument('--pooling', default='cls', choices=['first-last-avg', 'last-avg', 'cls', 'pooler']) parser.add_argument('--task_name', default='ATEC', choices=['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STS-B']) parser.add_argument('--dropout_rate', default=0.1, type=float) args = parser.parse_args() model_type = args.model_type pooling = args.pooling task_name = args.task_name dropout_rate = args.dropout_rate model_name = {'BERT': 'bert', 'RoBERTa': 'bert', 'SimBERT': 'bert', 'RoFormer': 'roformer', 'NEZHA': 'nezha'}[model_type] batch_size = 32 maxlen = 128 if task_name == 'PAWSX' else 64 el_dir = { 'BERT': 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12', 'RoBERTa': 'E:/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base', 'NEZHA': 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base', 'RoFormer': 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base', 'SimBERT': 'E:/pretrain_ckpt/simbert/sushen@simbert_chinese_base', }[model_type] config_path = f'{model_dir}/bert4torch_config.json' if model_type == 'BERT' else f'{model_dir}/config.json' checkpoint_path = f'{model_dir}/pytorch_model.bin' dict_path = f'{model_dir}/vocab.txt' data_path = 'E:/data/corpus/sentence_embedding/' device = 'cuda' if torch.cuda.is_available() else 'cpu' type in ['RoFormer']: tokenizer = Tokenizer(dict_path, do_lower_case=True, pre_tokenize=lambda s: jieba.lcut(s, HMM=False)) else: tokenizer = Tokenizer(dict_path, do_lower_case=True) ames = [f'{data_path}{task_name}/{task_name}.{f}.data' for f in ['train', 'valid', 'test']] print(all_names) all_texts = load_data(all_names) train_texts = [j for i in all_texts for j in i[:2]] if task_name != 'PAWSX': np.random.shuffle(train_texts) train_texts = train_texts[:10000] n(batch): texts_list = [[] for _ in range(3)] for text in batch: token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[0].append([tokenizer._token_start_id] + add_noise(token_ids[1:-1]) + [tokenizer._token_end_id]) texts_list[1].append(token_ids[:-1]) texts_list[2].append(token_ids[1:]) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) return texts_list[:2], texts_list[2].flatten() train_dataloader = DataLoader(ListDataset(data=train_texts), shuffle=True, batch_size=batch_size, collate_fn=collate_fn) n_eval(batch): texts_list = [[] for _ in range(2)] labels = [] for text1, text2, label in batch: texts_list[0].append(tokenizer.encode(text1, maxlen=maxlen)[0]) texts_list[1].append(tokenizer.encode(text2, maxlen=maxlen)[0]) labels.append(label) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.tensor(labels, dtype=torch.float, device=device) return texts_list, labels valid_dataloader = DataLoader(ListDataset(data=all_texts), batch_size=batch_size, collate_fn=collate_fn_eval) eModel): def encode(self, token_ids): self.eval() with torch.no_grad(): hidden_state, pool_cls = self.encoder([token_ids]) output = get_pool_emb(hidden_state, pool_cls, token_ids.gt(0).long(), self.pool_method) return output model = Model(pool_method=pooling).to(device) CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), lr=2e-4), ) e(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine class Evaluator(Callback): """评估与保存 """ if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=5, steps_per_epoch=None, callbacks=[evaluator] ) else: model.load_weights('best_model.pt') def evaluate(data): cosine_scores, labels = [], [] for (batch_token1_ids, batch_token2_ids), label in tqdm(data): embeddings1 = model.encode(batch_token1_ids).cpu().numpy() embeddings2 = model.encode(batch_token2_ids).cpu().numpy() cosine_score = 1 - (paired_cosine_distances(embeddings1, embeddings2)) cosine_scores.append(cosine_score) labels.append(label) cosine_scores = np.concatenate(cosine_scores) labels = torch.cat(labels).cpu().numpy() eval_pearson_cosine, _ = spearmanr(labels, cosine_scores) return eval_pearson_cosine
null
20,878
from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import spearmanr import random from tqdm import tqdm import argparse import numpy as np maxlen = 64 if task_name != 'PAWSX' else 128 device = 'cuda' if torch.cuda.is_available() else 'cpu' texts_list = [[] for _ in range(3)] for texts in batch: for i, text in enumerate(texts): token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[i].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, label def collate_fn(batch): texts_list = [[] for _ in range(3)] for texts in batch: for i, text in enumerate(texts): token_ids, _ = tokenizer.encode(text, maxlen=maxlen) texts_list[i].append(token_ids) for i, texts in enumerate(texts_list): texts_list[i] = torch.tensor(sequence_padding(texts), dtype=torch.long, device=device) labels = torch.arange(texts_list[0].size(0), device=texts_list[0].device) return texts_list, labels
null
20,879
from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import spearmanr import random from tqdm import tqdm import argparse import numpy as np task_name = args.task_name with open(filename, encoding='utf-8') as f: for l in f: l = l.strip().split('\t') if len(l) != 3: continue text1, text2, label = l label = str(int(int(label) > 2.5)) if task_name == 'STS-B' else label if text1 not in train_data: train_data[text1] = {'0': set(), '1': set()} train_data[text1][label].add(text2) if text2 not in train_data: train_data[text2] = {'0': set(), '1': set()} train_data[text2][label].add(text1) all_texts.extend([text1, text2]) train_samples = [] for sent1, others in train_data.items(): if len(others['1']) == 0: others['1'] = [sent1] # 没有正样本,使用自身作为正阳本,这里其实就是无监督 elif len(others['0']) == 0: others['0'] = [random.choice(all_texts)] # 没有负样本,随机挑选一个负样本 # sentence bert的逻辑是下面两个都加进去,这样的问题是如果shuffle=False,处于同一个batch中,相似句可能label给的负样本 if random.random() < 0.5: train_samples.append((sent1, random.choice(list(others['1'])), random.choice(list(others['0'])))) else: train_samples.append((random.choice(list(others['1'])), sent1, random.choice(list(others['0'])))) return train_samples train_data = get_data(f'E:/data/corpus/sentence_embedding/{task_name}/{task_name}.train.data') def get_data(filename): train_data, all_texts = {}, [] with open(filename, encoding='utf-8') as f: for l in f: l = l.strip().split('\t') if len(l) != 3: continue text1, text2, label = l label = str(int(int(label) > 2.5)) if task_name == 'STS-B' else label if text1 not in train_data: train_data[text1] = {'0': set(), '1': set()} train_data[text1][label].add(text2) if text2 not in train_data: train_data[text2] = {'0': set(), '1': set()} train_data[text2][label].add(text1) all_texts.extend([text1, text2]) train_samples = [] for sent1, others in train_data.items(): if len(others['1']) == 0: others['1'] = [sent1] # 没有正样本,使用自身作为正阳本,这里其实就是无监督 elif len(others['0']) == 0: others['0'] = [random.choice(all_texts)] # 没有负样本,随机挑选一个负样本 # sentence bert的逻辑是下面两个都加进去,这样的问题是如果shuffle=False,处于同一个batch中,相似句可能label给的负样本 if random.random() < 0.5: train_samples.append((sent1, random.choice(list(others['1'])), random.choice(list(others['0'])))) else: train_samples.append((random.choice(list(others['1'])), sent1, random.choice(list(others['0'])))) return train_samples
null
20,880
from bert4torch.models import build_transformer_model, BaseModel from bert4torch.callbacks import Callback from bert4torch.snippets import sequence_padding, ListDataset, get_pool_emb, seed_everything import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics.pairwise import paired_cosine_distances from scipy.stats import spearmanr import random from tqdm import tqdm import argparse import numpy as np maxlen = 64 if task_name != 'PAWSX' else 128 device = 'cuda' if torch.cuda.is_available() else 'cpu' def collate_fn_eval(batch): batch_token1_ids, batch_token2_ids, batch_labels = [], [], [] for text1, text2, label in batch: token1_ids, _ = tokenizer.encode(text1, maxlen=maxlen) batch_token1_ids.append(token1_ids) token2_ids, _ = tokenizer.encode(text2, maxlen=maxlen) batch_token2_ids.append(token2_ids) batch_labels.append([label]) batch_token1_ids = torch.tensor(sequence_padding(batch_token1_ids), dtype=torch.long, device=device) batch_token2_ids = torch.tensor(sequence_padding(batch_token2_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return (batch_token1_ids, batch_token2_ids), batch_labels.flatten()
null
20,881
from torch.nn import Module from basic_language_model_chatglm import cli_demo def auto_configure_device_map(num_gpus: int) -> Dict[str, int]: # embeddings.word_embeddings 占用1层 # LayerNormFinal 和 lm_head 占用1层 # transformer.layers 占用 28 层 # 总共30层分配到num_gpus张卡上 num_trans_layers = 28 per_gpu_layers = 30 / num_gpus # bugfix: 在linux中调用torch.embedding传入的weight,input不在同一device上,导致RuntimeError # windows下 model.device 会被设置成 embeddings.word_embeddings.device # linux下 model.device 会被设置成 lm_head.device # 在调用chat或者stream_chat时,input_ids会被放到model.device上 # 如果embeddings.word_embeddings.device和model.device不同,则会导致RuntimeError # 因此这里将embeddings.word_embeddings,LayerNormFinal,lm_head都放到第一张卡上 device_map = {'embeddings.word_embeddings': 0, 'LayerNormFinal': 0, 'lm_head': 0} used = 2 gpu_target = 0 for i in range(num_trans_layers): if used >= per_gpu_layers: gpu_target += 1 used = 0 assert gpu_target < num_gpus device_map[f'encoderLayer.{i}'] = gpu_target used += 1 return device_map def load_model_on_gpus(model, num_gpus: int = 2, device_map: Optional[Dict[str, int]] = None, **kwargs) -> Module: if num_gpus < 2 and device_map is None: return model else: from accelerate import dispatch_model if device_map is None: device_map = auto_configure_device_map(num_gpus) model = dispatch_model(model, device_map=device_map) return model
null
20,882
import ChatGlm2OpenaiApi from bert4torch.pipelines import ChatOpenaiClient, ChatOpenaiClientSseclient def call_openai(stream=True): url = 'http://127.0.0.1:8000' messages = [ {"content": "你好", "role": "user"}, {"content": "你好,我是法律大模型", "role": "assistant"}, {"content": "基金从业可以购买股票吗", "role": "user"} ] client = ChatOpenaiClient(url) if stream: for token in client.stream_chat(messages): print(token, end='', flush=True) else: print(client.chat(messages))
null
20,883
import ChatGlm2OpenaiApi from bert4torch.pipelines import ChatOpenaiClient, ChatOpenaiClientSseclient def call_sseclient(): url = 'http://127.0.0.1:8000/chat/completions' body = { "messages": [ {"content": "你好", "role": "user"}, {"content": "你好,我是法律大模型", "role": "assistant"}, {"content": "基金从业可以购买股票吗", "role": "user"}], "model": "default", "stream": True } client = ChatOpenaiClientSseclient(url) # 测试打印 client.stream_chat_cli(body) # 测试返回 print('\n-------------------------------------------') for token in client.stream_chat(body): print(token, end='', flush=True)
null
20,884
cli_demo = ChatGlmCli(dir_path, generation_config=generation_config, quantization_config=quantization_config) async def create_item(request: Request): json_post_raw = await request.json() json_post = json.dumps(json_post_raw) json_post_list = json.loads(json_post) prompt = json_post_list.get('prompt') history = json_post_list.get('history') response = cli_demo.chat(prompt, history=history) history.append((prompt, response)) now = datetime.datetime.now() time = now.strftime("%Y-%m-%d %H:%M:%S") answer = { "response": response, "history": history, "status": 200, "time": time } log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"' print(log) cuda_empty_cache(CUDA_DEVICE) return answer
null
20,885
from transformers import AutoTokenizer, AutoModelForCausalLM import torch import os from bert4torch.quantization import quantize_cpm_kernels def clear(): os.system('cls' if platform.system() == 'Windows' else 'clear')
null
20,886
import numpy as np from bert4torch.models import build_transformer_model from bert4torch.tokenizers import SpTokenizer from bert4torch.generation import AutoRegressiveDecoder import torch import jieba jieba.initialize() The provided code snippet includes necessary dependencies for implementing the `pre_tokenize` function. Write a Python function `def pre_tokenize(text)` to solve the following problem: 分词前处理函数,'\n'替换成'▃', ' '替换成'▂' Here is the function: def pre_tokenize(text): """分词前处理函数,'\n'替换成'▃', ' '替换成'▂' """ return [ w.replace(' ', u'\u2582').replace('\n', u'\u2583') for w in jieba.cut(text, cut_all=False) ]
分词前处理函数,'\n'替换成'▃', ' '替换成'▂'
20,887
import torch from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, get_pool_emb from bert4torch.generation import AutoRegressiveDecoder from bert4torch.tokenizers import Tokenizer, load_vocab synonyms_generator = SynonymsGenerator(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=maxlen, device=device) def cal_sen_emb(text_list): '''输入text的list,计算sentence的embedding ''' X, S = [], [] for t in text_list: x, s = tokenizer.encode(t) X.append(x) S.append(s) X = torch.tensor(sequence_padding(X), dtype=torch.long, device=device) S = torch.tensor(sequence_padding(S), dtype=torch.long, device=device) _, Z = model.predict([X, S]) return Z The provided code snippet includes necessary dependencies for implementing the `gen_synonyms` function. Write a Python function `def gen_synonyms(text, n=100, k=20)` to solve the following problem: 含义: 产生sent的n个相似句,然后返回最相似的k个。 做法:用seq2seq生成,并用encoder算相似度并排序。 效果: >>> gen_synonyms(u'微信和支付宝哪个好?') [ u'微信和支付宝,哪个好?', u'微信和支付宝哪个好', u'支付宝和微信哪个好', u'支付宝和微信哪个好啊', u'微信和支付宝那个好用?', u'微信和支付宝哪个好用', u'支付宝和微信那个更好', u'支付宝和微信哪个好用', u'微信和支付宝用起来哪个好?', u'微信和支付宝选哪个好', ] Here is the function: def gen_synonyms(text, n=100, k=20): """"含义: 产生sent的n个相似句,然后返回最相似的k个。 做法:用seq2seq生成,并用encoder算相似度并排序。 效果: >>> gen_synonyms(u'微信和支付宝哪个好?') [ u'微信和支付宝,哪个好?', u'微信和支付宝哪个好', u'支付宝和微信哪个好', u'支付宝和微信哪个好啊', u'微信和支付宝那个好用?', u'微信和支付宝哪个好用', u'支付宝和微信那个更好', u'支付宝和微信哪个好用', u'微信和支付宝用起来哪个好?', u'微信和支付宝选哪个好', ] """ r = synonyms_generator.generate(text, n) r = [i for i in set(r) if i != text] # 不和原文相同 r = [text] + r Z = cal_sen_emb(r) Z /= (Z**2).sum(dim=1, keepdims=True)**0.5 argsort = torch.matmul(Z[1:], -Z[0]).argsort() return [r[i + 1] for i in argsort[:k]]
含义: 产生sent的n个相似句,然后返回最相似的k个。 做法:用seq2seq生成,并用encoder算相似度并排序。 效果: >>> gen_synonyms(u'微信和支付宝哪个好?') [ u'微信和支付宝,哪个好?', u'微信和支付宝哪个好', u'支付宝和微信哪个好', u'支付宝和微信哪个好啊', u'微信和支付宝那个好用?', u'微信和支付宝哪个好用', u'支付宝和微信那个更好', u'支付宝和微信哪个好用', u'微信和支付宝用起来哪个好?', u'微信和支付宝选哪个好', ]
20,888
import build_transformer_model from bert4torch.snippets import sequence_padding, text_segmentate import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader import torch from bert4torch.models import build_transformer_model, DeepSpeedTrainer from bert4torch.snippets import ListDataset from bert4torch.generation import SeqGeneration from bert4torch.callbacks import Callback, Logger from bert4torch.optimizers import get_linear_schedule_with_warmup from transformers import AutoTokenizer import json import jieba from rouge_chinese import Rouge from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction import numpy as np from tqdm import tqdm import pandas as pd from peft import LoraConfig, prepare_model_for_kbit_training import os max_source_length = 256 prefix = '' device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = AutoTokenizer.from_pretrained(dir_path, use_fast=False) tokenizer.pad_token_id = 0 def build_prompt(query, answer=None, history=[]): def collate_train_fn(batch): def collate_dev_fn(batch): def collate_train_fn(batch): batch_token_ids = [] for query, answer, history in batch: prompt = prefix + build_prompt(query, answer, history) token_ids = tokenizer(text_target=prompt, max_length=max_source_length, truncation=True)['input_ids'] batch_token_ids.append(token_ids) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device) return [batch_token_ids], batch_token_ids
null
20,889
import build_transformer_model from bert4torch.snippets import sequence_padding, text_segmentate import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader import torch from bert4torch.models import build_transformer_model, DeepSpeedTrainer from bert4torch.snippets import ListDataset from bert4torch.generation import SeqGeneration from bert4torch.callbacks import Callback, Logger from bert4torch.optimizers import get_linear_schedule_with_warmup from transformers import AutoTokenizer import json import jieba from rouge_chinese import Rouge from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction import numpy as np from tqdm import tqdm import pandas as pd from peft import LoraConfig, prepare_model_for_kbit_training import os max_target_length = 256 prefix = '' tokenizer = AutoTokenizer.from_pretrained(dir_path, use_fast=False) tokenizer.pad_token_id = 0 def build_prompt(query, answer=None, history=[]): prompt = "" for old_query, old_answer in history: prompt += "<s>Human: {}\n</s><s>Assistant: {}\n</s>".format(old_query, old_answer) prompt += "<s>Human: {}\n</s><s>Assistant: ".format(query) if answer is not None: prompt += answer + "\n</s>" return prompt def collate_train_fn(batch): batch_token_ids = [] for query, answer, history in batch: prompt = prefix + build_prompt(query, answer, history) token_ids = tokenizer(text_target=prompt, max_length=max_source_length, truncation=True)['input_ids'] batch_token_ids.append(token_ids) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device) return [batch_token_ids], batch_token_ids def collate_dev_fn(batch): batch_prompt, batch_labels = [], [] for query, labels, history in batch: batch_prompt.append(prefix + build_prompt(query, None, history)) label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids'] batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True)) return batch_prompt, batch_labels def collate_dev_fn(batch): batch_prompt, batch_labels = [], [] for query, labels, history in batch: batch_prompt.append(prefix + build_prompt(query, None, history)) label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids'] batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True)) return batch_prompt, batch_labels
null
20,890
from bert4torch.models import build_transformer_model from bert4torch.snippets import sequence_padding, text_segmentate import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader import torch from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import ListDataset from bert4torch.generation import SeqGeneration from bert4torch.callbacks import Callback, Logger from bert4torch.optimizers import get_linear_schedule_with_warmup from transformers import AutoTokenizer import json import jieba from rouge_chinese import Rouge from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction import numpy as np from tqdm import tqdm from peft import LoraConfig, prepare_model_for_kbit_training ort os max_source_length = 64 max_target_length = 64 prefix = '' device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = AutoTokenizer.from_pretrained(dir_path, trust_remote_code=True) def build_prompt(query, history): if history_column is None: prompt = query else: prompt = "" for i, (old_query, answer) in enumerate(history): prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, answer) prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) return prompt def collate_train_fn(batch): batch_token_ids, batch_labels = [], [] for query, answer, history in batch: prompt = build_prompt(query, history) prompt = prefix + prompt a_ids = tokenizer.encode(text=prompt, add_special_tokens=False) b_ids = tokenizer.encode(text=answer, add_special_tokens=False) if len(a_ids) > max_source_length - 1: a_ids = a_ids[:max_source_length - 1] if len(b_ids) > max_target_length - 2: b_ids = b_ids[:max_target_length - 2] input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids) context_length = input_ids.index(tokenizer.bos_token_id) mask_position = context_length - 1 labels = [-100] * context_length + input_ids[mask_position+1:] batch_token_ids.append(input_ids) batch_labels.append(labels) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device) batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device) return [batch_token_ids], batch_labels def collate_dev_fn(batch): batch_prompt, batch_labels = [], [] for query, labels, history in batch: batch_prompt.append(prefix + build_prompt(query, history)) label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids'] batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True)) return batch_prompt, batch_labels from transformers import BitsAndBytesConfig def collate_train_fn(batch): batch_token_ids, batch_labels = [], [] for query, answer, history in batch: prompt = build_prompt(query, history) prompt = prefix + prompt a_ids = tokenizer.encode(text=prompt, add_special_tokens=False) b_ids = tokenizer.encode(text=answer, add_special_tokens=False) if len(a_ids) > max_source_length - 1: a_ids = a_ids[:max_source_length - 1] if len(b_ids) > max_target_length - 2: b_ids = b_ids[:max_target_length - 2] input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids) context_length = input_ids.index(tokenizer.bos_token_id) mask_position = context_length - 1 labels = [-100] * context_length + input_ids[mask_position+1:] batch_token_ids.append(input_ids) batch_labels.append(labels) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device) batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device) return [batch_token_ids], batch_labels
null
20,891
from bert4torch.models import build_transformer_model from bert4torch.snippets import sequence_padding, text_segmentate import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader import torch from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import ListDataset from bert4torch.generation import SeqGeneration from bert4torch.callbacks import Callback, Logger from bert4torch.optimizers import get_linear_schedule_with_warmup from transformers import AutoTokenizer import json import jieba from rouge_chinese import Rouge from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction import numpy as np from tqdm import tqdm from peft import LoraConfig, prepare_model_for_kbit_training ort os max_target_length = 64 prefix = '' tokenizer = AutoTokenizer.from_pretrained(dir_path, trust_remote_code=True) def build_prompt(query, history): if history_column is None: prompt = query else: prompt = "" for i, (old_query, answer) in enumerate(history): prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, answer) prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) return prompt def collate_train_fn(batch): batch_token_ids, batch_labels = [], [] for query, answer, history in batch: prompt = build_prompt(query, history) prompt = prefix + prompt a_ids = tokenizer.encode(text=prompt, add_special_tokens=False) b_ids = tokenizer.encode(text=answer, add_special_tokens=False) if len(a_ids) > max_source_length - 1: a_ids = a_ids[:max_source_length - 1] if len(b_ids) > max_target_length - 2: b_ids = b_ids[:max_target_length - 2] input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids) context_length = input_ids.index(tokenizer.bos_token_id) mask_position = context_length - 1 labels = [-100] * context_length + input_ids[mask_position+1:] batch_token_ids.append(input_ids) batch_labels.append(labels) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device) batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device) return [batch_token_ids], batch_labels def collate_dev_fn(batch): batch_prompt, batch_labels = [], [] for query, labels, history in batch: batch_prompt.append(prefix + build_prompt(query, history)) label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids'] batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True)) return batch_prompt, batch_labels from transformers import BitsAndBytesConfig def collate_dev_fn(batch): batch_prompt, batch_labels = [], [] for query, labels, history in batch: batch_prompt.append(prefix + build_prompt(query, history)) label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids'] batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True)) return batch_prompt, batch_labels
null
20,892
from dataclasses import dataclass, field from typing import List, Optional, Dict, Sequence from bert4torch.snippets import log_warn import torch from torch import nn import os def get_model_config(model): if model == 'bloom': model_type = 'bloom' dir_path = 'E:/pretrain_ckpt/bloom/bloomz-560m' config_path = dir_path + '/bert4torch_config.json' checkpoint_path = [os.path.join(dir_path, i) for i in os.listdir(dir_path) if i.endswith('.bin')] elif model == 'llama2': model_type = 'llama' dir_path = 'E:/pretrain_ckpt/llama/llama-2-7b-chat' config_path = dir_path + '/bert4torch_config.json' checkpoint_path = [os.path.join(dir_path, i) for i in os.listdir(dir_path) if i.endswith('.bin')] else: raise ValueError(f'illegal model_choice={model}') return model_type, dir_path, config_path, checkpoint_path
null
20,893
from dataclasses import dataclass, field from typing import List, Optional, Dict, Sequence from bert4torch.snippets import log_warn import torch from torch import nn import os def get_nbit_lora_model(model, load_in_nbit=None, use_lora=False): # 量化 if load_in_nbit == 8: model.gradient_checkpointing_enable() model.enable_input_require_grads() class CastOutputToFloat(nn.Sequential): def forward(self, x): return super().forward(x).to(torch.float32) model = model.quantize(quantization_method='load_in_8bit', llm_int8_skip_modules=['model.embeddings.word_embeddings', 'lm_head']) model.lm_head = CastOutputToFloat(model.lm_head) elif load_in_nbit == 4: from transformers import BitsAndBytesConfig from peft import prepare_model_for_kbit_training q_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.float16, # 可选 torch.float32, torch.float16, torch.bfloat16 llm_int8_skip_modules=['model.embeddings.word_embeddings', 'lm_head'] ) model = model.quantize(quantization_method='load_in_4bit', quantization_config=q_config) model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True) # lora if use_lora: from peft import LoraConfig peft_config = LoraConfig( inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, target_modules=['q', 'k', 'v'] ) model = model.get_peft_model(peft_config) return model
null
20,894
from dataclasses import dataclass, field from typing import List, Optional, Dict, Sequence from bert4torch.snippets import log_warn import torch from torch import nn import os class Conversation: """A class that manages prompt templates and keeps all conversation history.""" # The name of this template name: str # The system prompt system_prompt: str # All messages. format: list of [question, answer] messages: Optional[List[Sequence[str]]] # The roles of the speakers roles: Optional[Sequence[str]] # Conversation prompt prompt: str # Separator sep: str def get_prompt( self, messages: Optional[List[Sequence[str]]] = None, system_prompt: Optional[str] = "" ) -> str: """ Returns a string containing prompt without response. """ return "".join(self._format_example(messages, system_prompt)) def get_dialog( self, messages: Optional[List[Sequence[str]]] = None, system_prompt: Optional[str] = "" ) -> List[str]: """ Returns a list containing 2 * n elements where the 2k-th is a query and the (2k+1)-th is a response. """ return self._format_example(messages, system_prompt) def _format_example( self, messages: Optional[List[Sequence[str]]] = None, system_prompt: Optional[str] = "" ) -> List[str]: system_prompt = system_prompt or self.system_prompt system_prompt = system_prompt + self.sep if system_prompt else "" # add separator for non-empty system prompt messages = messages or self.messages convs = [] for turn_idx, [user_query, bot_resp] in enumerate(messages): if turn_idx == 0: convs.append(system_prompt + self.prompt.format(query=user_query)) convs.append(bot_resp) else: convs.append(self.sep + self.prompt.format(query=user_query)) convs.append(bot_resp) return convs def append_message(self, query: str, answer: str): """Append a new message.""" self.messages.append([query, answer]) conv_templates: Dict[str, Conversation] = {} The provided code snippet includes necessary dependencies for implementing the `register_conv_template` function. Write a Python function `def register_conv_template(template: Conversation)` to solve the following problem: Register a new conversation template. Here is the function: def register_conv_template(template: Conversation): """Register a new conversation template.""" conv_templates[template.name] = template
Register a new conversation template.
20,895
from glob import glob import torch import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader from bert4torch.optimizers import get_linear_schedule_with_warmup from bert4torch.snippets import DottableDict, ListDataset, sequence_padding, seed_everything from bert4torch.models import BaseModel, build_transformer_model from bert4torch.trainer import DPOTrainer from bert4torch.callbacks import Callback, Logger from bert4torch.losses import DPOLoss from utils import get_model_config, get_nbit_lora_model from transformers import AutoTokenizer import json import copy args.steps_per_epoch = None args.epochs = 1 args.data_path = 'E:/Github/MedicalGPT/data/reward/**/*.json' args.device = "cuda" if torch.cuda.is_available() else "cpu" args.use_fast_tokenizer = False args.seed = 1234 args.lr = 1e-5 args.batch_size = 2 args.max_src_length = 128 args.max_tgt_length = 128 args.full_max_length = args.max_src_length + args.max_tgt_length args.grad_accumulation_steps = 1 args.trust_remote_code = True args.use_lora = False args.load_in_nbit = None args.model_type, args.model_name_or_path, args.config_path, args.checkpoint_path = get_model_config('bloom') args.model_type == 'bloom': args.use_fast_tokenizer = Tru { "use_fast": args.use_fast_tokenizer, "trust_remote_code": args.trust_remote_code, } if args.model_type == "llama" and tokenizer.pad_token is None: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) pad_token_id = tokenizer.pad_token_id or -100 def collate_fn(batch): def collate_fn(batch): chosen_ids, chosen_labels, rejected_ids, rejected_labels = [], [], [], [] for prompt_id, chosen_id, rejected_id in batch: chosen_ids.append(prompt_id+chosen_id) chosen_labels.append([pad_token_id]*len(prompt_id) + chosen_id) # prompt部分用padding位 rejected_ids.append(prompt_id+rejected_id) rejected_labels.append([pad_token_id]*len(prompt_id) + rejected_id) # 这里是把chosen和rejected放到同一个batch中,前半部分是chosen,后半部分是rejected input_ids = torch.tensor(sequence_padding(chosen_ids+rejected_ids, value=pad_token_id), dtype=torch.long, device=args.device) input_labels = torch.tensor(sequence_padding(chosen_labels+rejected_labels, value=pad_token_id), dtype=torch.long, device=args.device) return input_ids, input_labels
null
20,896
import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import ListDataset, sequence_padding, DottableDict from bert4torch.callbacks import Callback, Logger from bert4torch.optimizers import get_linear_schedule_with_warmup from transformers import AutoTokenizer from tqdm import tqdm from glob import glob import json from sklearn.metrics import mean_squared_error, mean_absolute_error from utils import get_model_config, get_nbit_lora_model args.lr = 1e-5 args.batch_size = 4 args.eval_batch_size = 4 args.grad_accumulation_steps = 1 args.max_seq_length = 512 args.epochs = 1 args.steps_per_epoch = 100 args.use_lora = False args.load_in_nbit = None args.data_path = 'E:/Github/MedicalGPT/data/reward/**/*.json' args.device = 'cuda' if torch.cuda.is_available() else 'cpu' args.model_type, args.dir_path, args.config_path, args.checkpoint_path = get_model_config('bloom') pad_token_id = tokenizer.pad_token_id or -100 def collate_fn(batch): input_ids_chosen, input_ids_rejected = [], [] for input_ids_chosen_i, input_ids_rejected_i in batch: input_ids_chosen.append(input_ids_chosen_i) input_ids_rejected.append(input_ids_rejected_i) # padding在左侧 input_ids_chosen = torch.tensor(sequence_padding(input_ids_chosen, value=pad_token_id, mode='pre'), dtype=torch.long, device=args.device) input_ids_rejected = torch.tensor(sequence_padding(input_ids_rejected, value=pad_token_id, mode='pre'), dtype=torch.long, device=args.device) return [input_ids_chosen, input_ids_rejected], None def collate_fn(batch): input_ids_chosen, input_ids_rejected = [], [] for input_ids_chosen_i, input_ids_rejected_i in batch: input_ids_chosen.append(input_ids_chosen_i) input_ids_rejected.append(input_ids_rejected_i) # padding在左侧 input_ids_chosen = torch.tensor(sequence_padding(input_ids_chosen, value=pad_token_id, mode='pre'), dtype=torch.long, device=args.device) input_ids_rejected = torch.tensor(sequence_padding(input_ids_rejected, value=pad_token_id, mode='pre'), dtype=torch.long, device=args.device) return [input_ids_chosen, input_ids_rejected], None
null
20,897
from bert4torch.models import build_transformer_model from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, DottableDict from bert4torch.callbacks import Callback, Logger import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader import torch from bert4torch.models import build_transformer_model import json from glob import glob from transformers import AutoTokenizer from tqdm import tqdm from utils import get_model_config, get_conv_template, get_nbit_lora_model args.max_source_length = 256 args.max_target_length = 256 args.max_length = args.max_source_length + args.max_target_length args.batch_size = 2 args.grad_accumulation_steps = 4 args.lr = 5e-5 args.epochs = 1 args.use_lora = False args.load_in_nbit = None args.data_path = 'E:/Github/MedicalGPT/data/finetune/**/*.jsonl' args.device = 'cuda' if torch.cuda.is_available() else 'cpu' args.model_name = 'bloom' args.model_type, args.dir_path, args.config_path, args.checkpoint_path = get_model_config(args.model_name) tokenizer = AutoTokenizer.from_pretrained(args.dir_path, trust_remote_code=True) pad_token_id = tokenizer.pad_token_id or -100 def collate_fn(batch): batch_token_ids, batch_labels = [], [] for token_ids, label_ids in batch: batch_token_ids.append(token_ids) batch_labels.append(label_ids) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=pad_token_id), dtype=torch.long, device=args.device) batch_labels = torch.tensor(sequence_padding(batch_labels, value=pad_token_id), dtype=torch.long, device=args.device) return [batch_token_ids], batch_labels def get_conv_template(name: str) -> Conversation: """Get a conversation template.""" if name in conv_templates: return conv_templates[name] else: log_warn('No template found and use `vicuna` instead') return conv_templates['vicuna'] The provided code snippet includes necessary dependencies for implementing the `preprocess_function` function. Write a Python function `def preprocess_function(examples)` to solve the following problem: Preprocessing the datasets. part of code modified from https://github.com/lm-sys/FastChat Here is the function: def preprocess_function(examples): """ Preprocessing the datasets. part of code modified from https://github.com/lm-sys/FastChat """ input_ids_list = [] targets_list = [] roles = ["human", "gpt"] prompt_template = get_conv_template(args.model_name) def get_dialog(examples): for i, source in enumerate(examples): if len(source) < 2: continue data_role = source[0].get("from", "") if data_role not in roles or data_role != roles[0]: # Skip the first one if it is not from human source = source[1:] if len(source) < 2: continue messages = [] for j, sentence in enumerate(source): data_role = sentence.get("from", "") if data_role not in roles: logger.warning(f"unknown role: {data_role}, {i}. (ignored)") break if data_role == roles[j % 2]: messages.append(sentence["value"]) if len(messages) < 2 or len(messages) % 2 != 0: continue # Convert the list to pairs of elements history_messages = [[messages[k], messages[k + 1]] for k in range(0, len(messages), 2)] dialog = prompt_template.get_dialog(history_messages) yield dialog for dialog in get_dialog(examples): input_ids, labels = [], [] for i in range(len(dialog) // 2): source_ids = tokenizer.encode(text=dialog[2 * i], add_special_tokens=(i == 0)) target_ids = tokenizer.encode(text=dialog[2 * i + 1], add_special_tokens=False) if len(source_ids) > args.max_source_length: source_ids = source_ids[:args.max_source_length] if len(target_ids) > args.max_target_length - 1: # eos token target_ids = target_ids[:args.max_target_length - 1] if len(source_ids) > 0 and source_ids[0] == tokenizer.eos_token_id: source_ids = source_ids[1:] if len(target_ids) > 0 and target_ids[-1] == tokenizer.eos_token_id: target_ids = target_ids[:-1] if len(input_ids) + len(source_ids) + len(target_ids) + 1 > args.max_length: break input_ids += source_ids + target_ids + [tokenizer.eos_token_id] # add eos token for each turn labels += [pad_token_id] * len(source_ids) + target_ids + [tokenizer.eos_token_id] input_ids_list.append(input_ids) targets_list.append(labels) return list(zip(input_ids_list, targets_list))
Preprocessing the datasets. part of code modified from https://github.com/lm-sys/FastChat
20,898
from bert4torch.models import build_transformer_model from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, DottableDict from bert4torch.callbacks import Callback, Logger import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader import torch from bert4torch.models import build_transformer_model import json from glob import glob from transformers import AutoTokenizer from tqdm import tqdm from utils import get_model_config, get_conv_template, get_nbit_lora_model args.max_source_length = 256 args.max_target_length = 256 args.max_length = args.max_source_length + args.max_target_length args.batch_size = 2 args.grad_accumulation_steps = 4 args.lr = 5e-5 args.epochs = 1 args.use_lora = False args.load_in_nbit = None args.data_path = 'E:/Github/MedicalGPT/data/finetune/**/*.jsonl' args.device = 'cuda' if torch.cuda.is_available() else 'cpu' args.model_name = 'bloom' args.model_type, args.dir_path, args.config_path, args.checkpoint_path = get_model_config(args.model_name) pad_token_id = tokenizer.pad_token_id or -100 def collate_fn(batch): def collate_fn(batch): batch_token_ids, batch_labels = [], [] for token_ids, label_ids in batch: batch_token_ids.append(token_ids) batch_labels.append(label_ids) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=pad_token_id), dtype=torch.long, device=args.device) batch_labels = torch.tensor(sequence_padding(batch_labels, value=pad_token_id), dtype=torch.long, device=args.device) return [batch_token_ids], batch_labels
null
20,899
from bert4torch.models import build_transformer_model from bert4torch.snippets import sequence_padding import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader import torch from bert4torch.models import build_transformer_model from bert4torch.snippets import IterDataset, DottableDict from bert4torch.callbacks import Callback, Logger from bert4torch.optimizers import get_linear_schedule_with_warmup from transformers import AutoTokenizer from tqdm import tqdm from glob import glob from utils import get_model_config, get_nbit_lora_model args.lr = 5e-5 args.batch_size = 1 args.eval_batch_size = 4 args.grad_accumulation_steps = 4 args.max_seq_length = 512 args.epochs = 1 args.steps_per_epoch = 500 args.data_path = 'E:/Github/MedicalGPT/data/pretrain/**/*.txt' args.device = 'cuda' if torch.cuda.is_available() else 'cpu' args.use_lora = False args.load_in_nbit = None args.model_type, args.dir_path, args.config_path, args.checkpoint_path = get_model_config('bloom') tokenizer = AutoTokenizer.from_pretrained(args.dir_path, trust_remote_code=True) def collate_fn(batch_token_ids): batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=args.device) return [batch_token_ids], batch_token_ids def collate_fn(batch_token_ids): batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=args.device) return [batch_token_ids], batch_token_ids
null