id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
20,694 | import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import jieba
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
model = Model().to(device)
The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(texts)` to solve the following problem:
单条样本推理
Here is the function:
def inference(texts):
'''单条样本推理
'''
for text in texts:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :]
segment_ids = torch.tensor(segment_ids, dtype=torch.long, device=device)[None, :]
logit = model.predict([token_ids, segment_ids])
y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy()
print(text, ' ----> ', y_pred) | 单条样本推理 |
20,695 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base/config.json'
checkpoint_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,696 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base/config.json'
checkpoint_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v1_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,697 | import torch
import torch.nn as nn
import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model
from torch.optim import Adam
import torch.nn.functional as F
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
from torch.utils.data import DataLoader
def load_data(filename):
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
D.append((text, int(label)))
return D | null |
20,698 | import torch
import torch.nn as nn
import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model
from torch.optim import Adam
import torch.nn.functional as F
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
from torch.utils.data import DataLoader
choice = 'semi-sup'
if choice == 'zero-shot2':
train_data = unlabeled_data # 仅使用无监督数据继续mlm预训练
elif choice == 'few-shot':
train_data = train_data[:num_labeled] # 仅使用少量监督数据
elif choice == 'semi-sup': # 少量监督数据和全量无监督数据做半监督
train_data = train_data[:num_labeled]
train_data = train_data + unlabeled_data
The provided code snippet includes necessary dependencies for implementing the `random_masking` function. Write a Python function `def random_masking(token_ids)` to solve the following problem:
对输入进行随机mask
Here is the function:
def random_masking(token_ids):
"""对输入进行随机mask
"""
rands = np.random.random(len(token_ids))
source, target = [], []
for r, t in zip(rands, token_ids):
if r < 0.15 * 0.8:
source.append(tokenizer._token_mask_id)
target.append(t)
elif r < 0.15 * 0.9:
source.append(t)
target.append(t)
elif r < 0.15:
source.append(np.random.choice(tokenizer._vocab_size - 1) + 1)
target.append(t)
else:
source.append(t)
target.append(0)
return source, target | 对输入进行随机mask |
20,699 | ers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
maxlen = 1024
batch_size = 3
config_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,700 | ers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
maxlen = 1024
batch_size = 3
config_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,701 | import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from tensorboardX import SummaryWriter
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
writer = SummaryWriter(log_dir='./summary')
seed_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
aloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.train.data'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data'), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.test.data'), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
# def on_batch_end(self, global_step, local_step, logs=None):
# if global_step % 10 == 0:
# writer.add_scalar(f"train/loss", logs['loss'], global_step)
# val_acc = evaluate(valid_dataloader)
# writer.add_scalar(f"valid/acc", val_acc, global_step)
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,702 | import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from tensorboardX import SummaryWriter
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
writer = SummaryWriter(log_dir='./summary')
seed_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.train.data'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data'), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.test.data'), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
# def on_batch_end(self, global_step, local_step, logs=None):
# if global_step % 10 == 0:
# writer.add_scalar(f"train/loss", logs['loss'], global_step)
# val_acc = evaluate(valid_dataloader)
# writer.add_scalar(f"valid/acc", val_acc, global_step)
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,703 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v2_char_base/config.json'
checkpoint_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v2_char_base/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v2_char_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,704 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v2_char_base/config.json'
checkpoint_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v2_char_base/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/roformer/sushen@roformer_v2_char_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,705 | import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from bert4torch.models import build_transformer_model
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.snippets import ListDataset, sequence_padding
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
device = 'cuda' if torch.cuda.is_available() else 'cpu'
maxlen = 128
batch_token_ids, batch_segment_ids = [], []
for texts in batch:
token_ids, segment_ids = [tokenizer._token_start_id], [0]
for i, text in enumerate(texts):
ids = tokenizer.encode(text)[0][1:]
# 这里做了截断
if len(token_ids) + len(ids) <= maxlen:
token_ids.extend(ids)
segment_ids.extend([i % 2] * len(ids))
else:
break
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [[batch_token_ids, batch_segment_ids]], [batch_token_ids, batch_segment_ids
def __init__(self, **kwargs):
super().__init__(**kwargs
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
[ ['哈哈', '哦', '你是猪', '不是'] ] [CLS]text1[SEP]text2[SEP]
Here is the function:
def collate_fn(batch):
"""
[
['哈哈', '哦', '你是猪', '不是']
]
[CLS]text1[SEP]text2[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for texts in batch:
token_ids, segment_ids = [tokenizer._token_start_id], [0]
for i, text in enumerate(texts):
ids = tokenizer.encode(text)[0][1:]
# 这里做了截断
if len(token_ids) + len(ids) <= maxlen:
token_ids.extend(ids)
segment_ids.extend([i % 2] * len(ids))
else:
break
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [[batch_token_ids, batch_segment_ids]], [batch_token_ids, batch_segment_ids] | [ ['哈哈', '哦', '你是猪', '不是'] ] [CLS]text1[SEP]text2[SEP] |
20,706 | import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from bert4torch.models import build_transformer_model
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.snippets import ListDataset, sequence_padding
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
for texts in batch:
token_ids, segment_ids = [tokenizer._token_start_id], [0]
for i, text in enumerate(texts):
ids = tokenizer.encode(text)[0][1:]
# 这里做了截断
if len(token_ids) + len(ids) <= maxlen:
token_ids.extend(ids)
segment_ids.extend([i % 2] * len(ids))
else:
break
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
def __init__(self, **kwargs):
super().__init__(**kwargs
def response(self, texts, topk=5):
token_ids, segment_ids = [tokenizer._token_start_id], [0]
for i, text in enumerate(texts):
ids = tokenizer.encode(text)[0][1:]
token_ids.extend(ids)
segment_ids.extend([i % 2] * len(ids))
results = self.random_sample([token_ids, segment_ids], n=1, topk=topk)
return tokenizer.decode(results[0].cpu().numpy())
chatbot =
def just_show():
texts = ["你什么时候开始实习"]
print('just show {0}'.format(chatbot.response(texts))) | null |
20,707 | import torch.optim as optim
import json
from torch.utils.data import DataLoader
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import SpTokenizer
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
pad_token_id = -100
max_i_len = 512
max_t_len = 128
device = 'cuda' if torch.cuda.is_available() else 'cpu'
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
格式为:
Here is the function:
def collate_fn(batch):
"""
格式为:
"""
batch_input_ids, batch_target_ids = [], []
for input_text, target_text in batch:
token_ids, _ = tokenizer.encode(input_text, maxlen=max_i_len)
batch_input_ids.append(token_ids)
token_ids, _ = tokenizer.encode(target_text, maxlen=max_t_len)
batch_target_ids.append([0] + token_ids)
batch_input_ids = torch.tensor(sequence_padding(batch_input_ids, value=pad_token_id),
dtype=torch.long,
device=device)
batch_target_ids = torch.tensor(sequence_padding(batch_target_ids, value=pad_token_id),
dtype=torch.long,
device=device)
return [[batch_input_ids], [batch_target_ids[:, :-1]]], batch_target_ids[:, 1:].flatten() | 格式为: |
20,708 | import torch.optim as optim
import json
from torch.utils.data import DataLoader
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import SpTokenizer
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
autogen =
def just_show():
input_text = """
假设”就在这个时候,王琪瑶已经坐在了蒋丽丽的床边“我们可以推断“蒋丽丽没有床”?选项:是的,不是,或也许?\n答案:
"""
print(input_text + autogen.generate(text=input_text)) | null |
20,709 |
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP]
"""
batch_content_ids, batch_titile_ids = [], []
for title, content in batch:
token_ids, _ = tokenizer.encode(content, maxlen=max_c_len)
batch_content_ids.append(token_ids)
token_ids, _ = tokenizer.encode(title, maxlen=max_t_len)
batch_titile_ids.append(token_ids)
batch_content_ids = torch.tensor(sequence_padding(batch_content_ids), dtype=torch.long, device=device)
batch_titile_ids = torch.tensor(sequence_padding(batch_titile_ids), dtype=torch.long, device=device)
return [[batch_content_ids], [batch_titile_ids[:, :-1]]], batch_titile_ids[:, 1:].flatten() | 单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP] |
20,710 |
def just_show():
s1 = u'抽象了一种基于中心的战术应用场景与业务,并将网络编码技术应用于此类场景的实时数据多播业务中。在分析基于中心网络与Many-to-all业务模式特性的基础上,提出了仅在中心节点进行编码操作的传输策略以及相应的贪心算法。分析了网络编码多播策略的理论增益上界,仿真试验表明该贪心算法能够获得与理论相近的性能增益。最后的分析与仿真试验表明,在这种有中心网络的实时数据多播应用中,所提出的多播策略的实时性能要明显优于传统传输策略。'
s2 = u'普适计算环境中未知移动节点的位置信息是定位服务要解决的关键技术。在普适计算二维空间定位过程中,通过对三角形定位单元区域的误差分析,提出了定位单元布局(LUD)定理。在此基础上,对多个定位单元布局进行了研究,定义了一个新的描述定位单元中定位参考点覆盖效能的物理量——覆盖基,提出了在误差最小情况下定位单元布局的覆盖基定理。仿真实验表明定位单元布局定理能更好地满足对普适终端实时定位的需求,且具有较高的精度和最大覆盖效能。'
for s in [s1, s2]:
print(u'生成标题:', autotitle.generate(s)) | null |
20,711 | h.models import build_transformer_model, BaseModel
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, seed_everything, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from tqdm import tqdm
import json
from rouge import Rouge
en = 256
max_t_len = 32
batch_size = 16
epochs = 50
steps_per_epoch = None
fig_path = 'E:/pretrain_ckpt/bart/fnlp@bart-base-chinese/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bart/fnlp@bart-base-chinese/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bart/fnlp@bart-base-chinese/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
train_dataloader = DataLoader(MyDataset('E:/data/corpus/seq2seq/summary/csl_title_public/csl_title_train.json'),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataset = MyDataset('E:/data/corpus/seq2seq/summary/csl_title_public/csl_title_dev.json')
test_dataset = MyDataset('E:/data/corpus/seq2seq/summary/csl_title_public/csl_title_test.json')
model = build_transformer_model(config_path, checkpoint_path, keep_tokens=keep_tokens, add_trainer=True).to(device)
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1.5e-5))
autotitle = AutoTitle(bos_token_id=tokenizer._token_end_id, eos_token_id=tokenizer._token_end_id, max_new_tokens=max_t_len, device=device)
Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.rouge = Rouge()
self.smooth = SmoothingFunction().method1
self.best_bleu = 0.
def on_epoch_end(self, steps, epoch, logs=None):
just_show()
metrics = self.evaluate(valid_dataset.data) # 评测模型
metrics_test = self.evaluate(test_dataset.data) # 评测模型
if metrics['bleu'] > self.best_bleu:
self.best_bleu = metrics['bleu']
# model.save_weights('./best_model.pt') # 保存模型
metrics['best_bleu'] = self.best_bleu
print('valid_data:', metrics)
print('test_data:', metrics_test)
def evaluate(self, data, topk=1):
total = 0
rouge_1, rouge_2, rouge_l, bleu = 0, 0, 0, 0
for title, content in tqdm(data):
total += 1
title = ' '.join(title).lower()
pred_title = ' '.join(autotitle.generate(content, topk)).lower()
if pred_title.strip():
scores = self.rouge.get_scores(hyps=pred_title, refs=title)
rouge_1 += scores[0]['rouge-1']['f']
rouge_2 += scores[0]['rouge-2']['f']
rouge_l += scores[0]['rouge-l']['f']
bleu += sentence_bleu(references=[title.split(' ')], hypothesis=pred_title.split(' '),
smoothing_function=self.smooth)
rouge_1, rouge_2, rouge_l, bleu = rouge_1/total, rouge_2/total, rouge_l/total, bleu/total
return {'rouge-1': rouge_1, 'rouge-2': rouge_2, 'rouge-l': rouge_l, 'bleu': bleu}
if __name__ == '__main__':
evaluator = Evaluator()
just_show()
model.fit(
train_dataloader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.pt')
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP]
"""
batch_content_ids, batch_titile_ids = [], []
for title, content in batch:
token_ids, _ = tokenizer.encode(content, maxlen=max_c_len)
batch_content_ids.append(token_ids)
token_ids, _ = tokenizer.encode(title, maxlen=max_t_len)
batch_titile_ids.append([tokenizer._token_end_id] + token_ids) # 预训练时候是用[SEP]开头的
batch_content_ids = torch.tensor(sequence_padding(batch_content_ids), dtype=torch.long, device=device)
batch_titile_ids = torch.tensor(sequence_padding(batch_titile_ids), dtype=torch.long, device=device)
return [[batch_content_ids], [batch_titile_ids[:, :-1]]], batch_titile_ids[:, 1:].flatten() | 单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP] |
20,712 | h.models import build_transformer_model, BaseModel
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, seed_everything, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from tqdm import tqdm
import json
from rouge import Rouge
en = 256
max_t_len = 32
batch_size = 16
epochs = 50
steps_per_epoch = None
fig_path = 'E:/pretrain_ckpt/bart/fnlp@bart-base-chinese/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bart/fnlp@bart-base-chinese/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bart/fnlp@bart-base-chinese/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
train_dataloader = DataLoader(MyDataset('E:/data/corpus/seq2seq/summary/csl_title_public/csl_title_train.json'),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataset = MyDataset('E:/data/corpus/seq2seq/summary/csl_title_public/csl_title_dev.json')
test_dataset = MyDataset('E:/data/corpus/seq2seq/summary/csl_title_public/csl_title_test.json')
model = build_transformer_model(config_path, checkpoint_path, keep_tokens=keep_tokens, add_trainer=True).to(device)
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1.5e-5))
autotitle = AutoTitle(bos_token_id=tokenizer._token_end_id, eos_token_id=tokenizer._token_end_id, max_new_tokens=max_t_len, device=device)
Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.rouge = Rouge()
self.smooth = SmoothingFunction().method1
self.best_bleu = 0.
def on_epoch_end(self, steps, epoch, logs=None):
just_show()
metrics = self.evaluate(valid_dataset.data) # 评测模型
metrics_test = self.evaluate(test_dataset.data) # 评测模型
if metrics['bleu'] > self.best_bleu:
self.best_bleu = metrics['bleu']
# model.save_weights('./best_model.pt') # 保存模型
metrics['best_bleu'] = self.best_bleu
print('valid_data:', metrics)
print('test_data:', metrics_test)
def evaluate(self, data, topk=1):
total = 0
rouge_1, rouge_2, rouge_l, bleu = 0, 0, 0, 0
for title, content in tqdm(data):
total += 1
title = ' '.join(title).lower()
pred_title = ' '.join(autotitle.generate(content, topk)).lower()
if pred_title.strip():
scores = self.rouge.get_scores(hyps=pred_title, refs=title)
rouge_1 += scores[0]['rouge-1']['f']
rouge_2 += scores[0]['rouge-2']['f']
rouge_l += scores[0]['rouge-l']['f']
bleu += sentence_bleu(references=[title.split(' ')], hypothesis=pred_title.split(' '),
smoothing_function=self.smooth)
rouge_1, rouge_2, rouge_l, bleu = rouge_1/total, rouge_2/total, rouge_l/total, bleu/total
return {'rouge-1': rouge_1, 'rouge-2': rouge_2, 'rouge-l': rouge_l, 'bleu': bleu}
if __name__ == '__main__':
evaluator = Evaluator()
just_show()
model.fit(
train_dataloader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.pt')
def just_show():
s1 = u'抽象了一种基于中心的战术应用场景与业务,并将网络编码技术应用于此类场景的实时数据多播业务中。在分析基于中心网络与Many-to-all业务模式特性的基础上,提出了仅在中心节点进行编码操作的传输策略以及相应的贪心算法。分析了网络编码多播策略的理论增益上界,仿真试验表明该贪心算法能够获得与理论相近的性能增益。最后的分析与仿真试验表明,在这种有中心网络的实时数据多播应用中,所提出的多播策略的实时性能要明显优于传统传输策略。'
s2 = u'普适计算环境中未知移动节点的位置信息是定位服务要解决的关键技术。在普适计算二维空间定位过程中,通过对三角形定位单元区域的误差分析,提出了定位单元布局(LUD)定理。在此基础上,对多个定位单元布局进行了研究,定义了一个新的描述定位单元中定位参考点覆盖效能的物理量——覆盖基,提出了在误差最小情况下定位单元布局的覆盖基定理。仿真实验表明定位单元布局定理能更好地满足对普适终端实时定位的需求,且具有较高的精度和最大覆盖效能。'
for s in [s1, s2]:
print(u'生成标题:', autotitle.generate(s)) | null |
20,713 |
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式:[CLS]文章[SEP]标题[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式:[CLS]文章[SEP]标题[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for title, content in batch:
token_ids, segment_ids = tokenizer.encode(content, title, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | 单条样本格式:[CLS]文章[SEP]标题[SEP] |
20,715 | os
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding
from bert4torch.snippets import ListDataset
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import re
import torch.nn.functional as F
def process_data():
if os.path.exists('E:/data/corpus/qa/CIPS-SOGOU/train_data.json'):
return
# 标注数据
webqa_data = json.load(open('E:/data/corpus/qa/WebQA.json', encoding='utf-8'))
sogou_data = json.load(open('E:/data/corpus/qa/SogouQA.json', encoding='utf-8'))
# 保存一个随机序(供划分valid用)
random_order = list(range(len(sogou_data)))
np.random.seed(2022)
np.random.shuffle(random_order)
# 划分valid
train_data = [sogou_data[j] for i, j in enumerate(random_order) if i % 3 != 0]
valid_data = [sogou_data[j] for i, j in enumerate(random_order) if i % 3 == 0]
train_data.extend(train_data)
train_data.extend(webqa_data) # 将SogouQA和WebQA按2:1的比例混合
json.dump(train_data, open('E:/data/corpus/qa/CIPS-SOGOU/train_data.json', 'w', encoding='utf-8'), indent=4)
json.dump(valid_data, open('E:/data/corpus/qa/CIPS-SOGOU/valid_data.json', 'w', encoding='utf-8'), indent=4) | null |
20,716 | from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding
from bert4torch.snippets import ListDataset
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import re
import torch.nn.functional as F
max_q_len = 64
max_a_len = 32
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(token_dict, do_lower_case=True)
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式为 输入: [CLS][MASK][MASK][SEP]问题[SEP]篇章[SEP] 输出: 答案
Here is the function:
def collate_fn(batch):
"""单条样本格式为
输入: [CLS][MASK][MASK][SEP]问题[SEP]篇章[SEP]
输出: 答案
"""
batch_token_ids, batch_segment_ids, batch_a_token_ids = [], [], []
for D in batch:
question = D['question']
answers = [p['answer'] for p in D['passages'] if p['answer']]
passage = np.random.choice(D['passages'])['passage']
passage = re.sub(u' |、|;|,', ',', passage)
final_answer = ''
for answer in answers:
if all([a in passage[:max_p_len - 2] for a in answer.split(' ')]):
final_answer = answer.replace(' ', ',')
break
a_token_ids, _ = tokenizer.encode(final_answer, maxlen=max_a_len + 1)
q_token_ids, _ = tokenizer.encode(question, maxlen=max_q_len + 1)
p_token_ids, _ = tokenizer.encode(passage, maxlen=max_p_len + 1)
token_ids = [tokenizer._token_start_id]
token_ids += ([tokenizer._token_mask_id] * max_a_len)
token_ids += [tokenizer._token_end_id]
token_ids += (q_token_ids[1:] + p_token_ids[1:])
segment_ids = [0] * len(token_ids)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_a_token_ids.append(a_token_ids[1:])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_a_token_ids = torch.tensor(sequence_padding(batch_a_token_ids, max_a_len), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_a_token_ids | 单条样本格式为 输入: [CLS][MASK][MASK][SEP]问题[SEP]篇章[SEP] 输出: 答案 |
20,717 | from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding
from bert4torch.snippets import ListDataset
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import re
import torch.nn.functional as F
def gen_answer(question, passages):
"""由于是MLM模型,所以可以直接argmax解码。
"""
all_p_token_ids, token_ids, segment_ids = [], [], []
for passage in passages:
passage = re.sub(u' |、|;|,', ',', passage)
p_token_ids, _ = tokenizer.encode(passage, maxlen=max_p_len + 1)
q_token_ids, _ = tokenizer.encode(question, maxlen=max_q_len + 1)
all_p_token_ids.append(p_token_ids[1:])
token_ids.append([tokenizer._token_start_id])
token_ids[-1] += ([tokenizer._token_mask_id] * max_a_len)
token_ids[-1] += [tokenizer._token_end_id]
token_ids[-1] += (q_token_ids[1:] + p_token_ids[1:])
segment_ids.append([0] * len(token_ids[-1]))
token_ids = torch.tensor(sequence_padding(token_ids), device=device)
segment_ids = torch.tensor(sequence_padding(segment_ids), device=device)
logit = model.predict([token_ids, segment_ids])[-1][:, 1:max_a_len+1, :]
probas = F.softmax(logit, dim=-1)
results = {}
for t, p in zip(all_p_token_ids, probas):
a, score = tuple(), 0.
for i in range(max_a_len):
idxs = list(get_ngram_set(t, i + 1)[a])
if tokenizer._token_end_id not in idxs:
idxs.append(tokenizer._token_end_id)
# pi是将passage以外的token的概率置零
pi = torch.zeros_like(p[i])
pi[idxs] = p[i, idxs]
a = a + (pi.argmax().item(),)
score += pi.max().item()
if a[-1] == tokenizer._token_end_id:
break
score = score / (i + 1)
a = tokenizer.decode(a)
if a:
results[a] = results.get(a, []) + [score]
results = {
k: (np.array(v)**2).sum() / (sum(v) + 1)
for k, v in results.items()
}
return results
def max_in_dict(d):
if d:
return sorted(d.items(), key=lambda s: -s[1])[0][0]
The provided code snippet includes necessary dependencies for implementing the `predict_to_file` function. Write a Python function `def predict_to_file(data, filename)` to solve the following problem:
将预测结果输出到文件,方便评估
Here is the function:
def predict_to_file(data, filename):
"""将预测结果输出到文件,方便评估
"""
with open(filename, 'w', encoding='utf-8') as f:
for d in tqdm(iter(data), desc=u'正在预测(共%s条样本)' % len(data)):
q_text = d['question']
p_texts = [p['passage'] for p in d['passages']]
a = gen_answer(q_text, p_texts)
a = max_in_dict(a)
if a:
s = u'%s\t%s\n' % (d['id'], a)
else:
s = u'%s\t\n' % (d['id'])
f.write(s)
f.flush() | 将预测结果输出到文件,方便评估 |
20,718 | t4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import glob
= 256
batch_size = 16
epochs = 10000
fig_path = 'E:/pretrain_ckpt/roberta/ethanyt@guwenbert-base/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/roberta/ethanyt@guwenbert-base/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/roberta/ethanyt@guwenbert-base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(dict_path, do_lower_case=True)
train_dataloader = DataLoader(ListDataset(glob.glob('E:/data/corpus/sentence_classification/THUCNews/*/*.txt')),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
model = build_transformer_model(
config_path,
checkpoint_path,
with_mlm=True,
application='unilm',
add_trainer=True,
pad_token_id=tokenizer._token_pad_id,
use_segment_embedding=False,
custom_position_ids='start_at_padding'
).to(device)
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1e-5))
autotitle = AutoTitle(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=32, device=device)
Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
just_show()
evaluator = Evaluator()
model.fit(
train_dataloader,
steps_per_epoch=None,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.pt')
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for txt in batch:
text = open(txt, encoding='utf-8').read()
text = text.split('\n')
if len(text) > 1:
title = text[0]
content = '\n'.join(text[1:])
token_ids, segment_ids = tokenizer.encode(content, title, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer._token_pad_id), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids, value=tokenizer._token_pad_id), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | 单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP] |
20,719 | t4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import glob
= 256
batch_size = 16
epochs = 10000
fig_path = 'E:/pretrain_ckpt/roberta/ethanyt@guwenbert-base/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/roberta/ethanyt@guwenbert-base/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/roberta/ethanyt@guwenbert-base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(dict_path, do_lower_case=True)
train_dataloader = DataLoader(ListDataset(glob.glob('E:/data/corpus/sentence_classification/THUCNews/*/*.txt')),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
model = build_transformer_model(
config_path,
checkpoint_path,
with_mlm=True,
application='unilm',
add_trainer=True,
pad_token_id=tokenizer._token_pad_id,
use_segment_embedding=False,
custom_position_ids='start_at_padding'
).to(device)
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1e-5))
autotitle = AutoTitle(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=32, device=device)
Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
just_show()
evaluator = Evaluator()
model.fit(
train_dataloader,
steps_per_epoch=None,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.pt')
def just_show():
s1 = u'夏天来临,皮肤在强烈紫外线的照射下,晒伤不可避免,因此,晒后及时修复显得尤为重要,否则可能会造成长期伤害。专家表示,选择晒后护肤品要慎重,芦荟凝胶是最安全,有效的一种选择,晒伤严重者,还请及 时 就医 。'
s2 = u'8月28日,网络爆料称,华住集团旗下连锁酒店用户数据疑似发生泄露。从卖家发布的内容看,数据包含华住旗下汉庭、禧玥、桔子、宜必思等10余个品牌酒店的住客信息。泄露的信息包括华住官网注册资料、酒店入住登记的身份信息及酒店开房记录,住客姓名、手机号、邮箱、身份证号、登录账号密码等。卖家对这个约5亿条数据打包出售。第三方安全平台威胁猎人对信息出售者提供的三万条数据进行验证,认为数据真实性非常高。当天下午 ,华 住集 团发声明称,已在内部迅速开展核查,并第一时间报警。当晚,上海警方消息称,接到华住集团报案,警方已经介入调查。'
for s in [s1, s2]:
print(u'生成标题:', autotitle.generate(s)) | null |
20,720 | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, get_pool_emb
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer, load_vocab
tokenizer = Tokenizer(token_dict, do_lower_case=True)
def truncate(text):
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
for d in batch:
text, synonyms = d['text'], d['synonyms']
synonyms = [text] + synonyms
np.random.shuffle(synonyms)
text, synonym = synonyms[:2]
text, synonym = truncate(text), truncate(synonym)
token_ids, segment_ids = tokenizer.encode(text, synonym, maxlen=maxlen * 2)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
token_ids, segment_ids = tokenizer.encode(synonym, text, maxlen=maxlen * 2)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | null |
20,721 | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, get_pool_emb
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer, load_vocab
def gen_synonyms(text, n=100, k=20):
""""含义: 产生sent的n个相似句,然后返回最相似的k个。
做法:用seq2seq生成,并用encoder算相似度并排序。
效果:
>>> gen_synonyms(u'微信和支付宝哪个好?')
[
u'微信和支付宝,哪个好?',
u'微信和支付宝哪个好',
u'支付宝和微信哪个好',
u'支付宝和微信哪个好啊',
u'微信和支付宝那个好用?',
u'微信和支付宝哪个好用',
u'支付宝和微信那个更好',
u'支付宝和微信哪个好用',
u'微信和支付宝用起来哪个好?',
u'微信和支付宝选哪个好',
]
"""
r = synonyms_generator.generate(text, n)
r = [i for i in set(r) if i != text] # 不和原文相同
r = [text] + r
Z = cal_sen_emb(r)
Z /= (Z**2).sum(dim=1, keepdims=True)**0.5
argsort = torch.matmul(Z[1:], -Z[0]).argsort()
return [r[i + 1] for i in argsort[:k]]
The provided code snippet includes necessary dependencies for implementing the `just_show` function. Write a Python function `def just_show(some_samples)` to solve the following problem:
随机观察一些样本的效果
Here is the function:
def just_show(some_samples):
"""随机观察一些样本的效果
"""
S = [np.random.choice(some_samples) for _ in range(3)]
for s in S:
try:
print(u'原句子:%s' % s)
print(u'同义句子:', gen_synonyms(s, 10, 10))
print()
except:
pass | 随机观察一些样本的效果 |
20,722 | import division
import json, re
from tqdm import tqdm
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from torch import nn, optim
import torch
from torch.utils.data import DataLoader
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from sympy import Integer
import warnings
def is_equal(a, b):
"""比较两个结果是否相等
"""
a = round(float(a), 6)
b = round(float(b), 6)
return a == b
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
for question, equation, answer in batch:
token_ids, segment_ids = tokenizer.encode(question, equation, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids]
The provided code snippet includes necessary dependencies for implementing the `remove_bucket` function. Write a Python function `def remove_bucket(equation)` to solve the following problem:
去掉冗余的括号
Here is the function:
def remove_bucket(equation):
"""去掉冗余的括号
"""
l_buckets, buckets = [], []
for i, c in enumerate(equation):
if c == '(':
l_buckets.append(i)
elif c == ')':
buckets.append((l_buckets.pop(), i))
eval_equation = eval(equation)
for l, r in buckets:
new_equation = '%s %s %s' % (equation[:l], equation[l + 1:r], equation[r + 1:])
try:
if is_equal(eval(new_equation.replace(' ', '')), eval_equation):
equation = new_equation
except:
pass
return equation.replace(' ', '') | 去掉冗余的括号 |
20,723 | import division
import json, re
from tqdm import tqdm
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from torch import nn, optim
import torch
from torch.utils.data import DataLoader
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from sympy import Integer
import warnings
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(token_dict, do_lower_case=True)
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
for question, equation, answer in batch:
token_ids, segment_ids = tokenizer.encode(question, equation, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids]
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
for question, equation, answer in batch:
token_ids, segment_ids = tokenizer.encode(question, equation, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | null |
20,724 | import numpy as np
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer
import torch.optim as optim
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from bert4torch.snippets import ListDataset, sequence_padding
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
from collections import defaultdic
The provided code snippet includes necessary dependencies for implementing the `lcs` function. Write a Python function `def lcs(source, target)` to solve the following problem:
最长公共子序列(source和target的最长非连续子序列) 返回:子序列长度, 映射关系(映射对组成的list) 注意:最长公共子序列可能不止一个,所返回的映射只代表其中一个。
Here is the function:
def lcs(source, target):
"""最长公共子序列(source和target的最长非连续子序列)
返回:子序列长度, 映射关系(映射对组成的list)
注意:最长公共子序列可能不止一个,所返回的映射只代表其中一个。
"""
c = defaultdict(int)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
elif c[i, j - 1] > c[i - 1, j]:
c[i, j] = c[i, j - 1]
else:
c[i, j] = c[i - 1, j]
l, mapping = c[len(source), len(target)], []
i, j = len(source) - 1, len(target) - 1
while len(mapping) < l:
if source[i] == target[j]:
mapping.append((i, j))
i, j = i - 1, j - 1
elif c[i + 1, j] > c[i, j + 1]:
j = j - 1
else:
i = i - 1
return l, mapping[::-1] | 最长公共子序列(source和target的最长非连续子序列) 返回:子序列长度, 映射关系(映射对组成的list) 注意:最长公共子序列可能不止一个,所返回的映射只代表其中一个。 |
20,725 | s, json
import numpy as np
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer
import torch.optim as optim
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from bert4torch.snippets import ListDataset, sequence_padding
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
from collections import defaultdict
def subject_split(s):
"""如果有义项,那么单独分离出来
"""
m = ''
if s[-1] == u')':
i = s.index(u'(')
m = s[i + 1:-1]
s = s[:i]
return s,
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(filename)` to solve the following problem:
读取数据集
Here is the function:
def load_data(filename):
"""读取数据集
"""
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
l = json.loads(l)
s, p, o = l['answer'].split(' ||| ')
s, m = subject_split(s)
D.append((l['question'], (s, p, m, ' '.join(o.split()))))
return D | 读取数据集 |
20,726 | import numpy as np
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer
import torch.optim as optim
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from bert4torch.snippets import ListDataset, sequence_padding
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
from collections import defaultdic
device = 'cuda' if torch.cuda.is_available() else 'cpu'
a = load_data('E:/data/corpus/kg/KgCLUE/train.json')
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
数据生成器 单条样本:[CLS] Q [SEP] S [SEP] P [SEP] M [SEP]
Here is the function:
def collate_fn(batch):
"""数据生成器
单条样本:[CLS] Q [SEP] S [SEP] P [SEP] M [SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for (q, a) in batch:
q_ids = tokenizer.encode(q, maxlen=maxlen // 2 + 1)[0]
a_ids = tokenizer.encode(a[0], a[1])[0]
a_ids += tokenizer.encode(a[2])[0][1:]
token_ids = (q_ids + a_ids[1:])[:maxlen]
segment_ids = [0] * len(q_ids)
segment_ids += [1] * (len(token_ids) - len(q_ids))
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | 数据生成器 单条样本:[CLS] Q [SEP] S [SEP] P [SEP] M [SEP] |
20,727 | s, json
import numpy as np
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer
import torch.optim as optim
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from bert4torch.snippets import ListDataset, sequence_padding
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
from collections import defaultdic
autoqa = AutoQA(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=maxlen, device=device)
The provided code snippet includes necessary dependencies for implementing the `test_predict` function. Write a Python function `def test_predict(in_file, out_file, topk=1)` to solve the following problem:
输出测试结果到文件 结果文件可以提交到 https://www.cluebenchmarks.com 评测。
Here is the function:
def test_predict(in_file, out_file, topk=1):
"""输出测试结果到文件
结果文件可以提交到 https://www.cluebenchmarks.com 评测。
"""
fw = open(out_file, 'w')
with open(in_file) as fr:
for l in tqdm(fr):
l = json.loads(l)
s, p, m, o = autoqa.generate(l['question'], topk=topk)
if m:
s += u'(%s)' % m
l['answer'] = '%s ||| %s ||| %s' % (s, p, o.split('\t')[0])
l = json.dumps(l, ensure_ascii=False)
fw.write(l + '\n')
fw.close() | 输出测试结果到文件 结果文件可以提交到 https://www.cluebenchmarks.com 评测。 |
20,730 | os
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import re
def process_data():
if os.path.exists('E:/data/corpus/qa/CIPS-SOGOU/train_data.json'):
return
# 标注数据
webqa_data = json.load(open('E:/data/corpus/qa/WebQA.json', encoding='utf-8'))
sogou_data = json.load(open('E:/data/corpus/qa/SogouQA.json', encoding='utf-8'))
# 保存一个随机序(供划分valid用)
random_order = list(range(len(sogou_data)))
np.random.seed(2022)
np.random.shuffle(random_order)
# 划分valid
train_data = [sogou_data[j] for i, j in enumerate(random_order) if i % 3 != 0]
valid_data = [sogou_data[j] for i, j in enumerate(random_order) if i % 3 == 0]
train_data.extend(train_data)
train_data.extend(webqa_data) # 将SogouQA和WebQA按2:1的比例混合
json.dump(train_data, open('E:/data/corpus/qa/CIPS-SOGOU/train_data.json', 'w', encoding='utf-8'), indent=4)
json.dump(valid_data, open('E:/data/corpus/qa/CIPS-SOGOU/valid_data.json', 'w', encoding='utf-8'), indent=4) | null |
20,731 | from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import re
max_qa_len = max_q_len + max_a_len
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(token_dict, do_lower_case=True)
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式: [CLS]篇章[SEP]问题[SEP]答案[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式: [CLS]篇章[SEP]问题[SEP]答案[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for D in batch:
question = D['question']
answers = [p['answer'] for p in D['passages'] if p['answer']]
passage = np.random.choice(D['passages'])['passage']
passage = re.sub(u' |、|;|,', ',', passage)
final_answer = ''
for answer in answers:
if all([a in passage[:max_p_len - 2] for a in answer.split(' ')]):
final_answer = answer.replace(' ', ',')
break
qa_token_ids, qa_segment_ids = tokenizer.encode(question, final_answer, maxlen=max_qa_len + 1)
p_token_ids, p_segment_ids = tokenizer.encode(passage, maxlen=max_p_len + 1)
token_ids = p_token_ids + qa_token_ids[1:]
segment_ids = p_segment_ids + qa_segment_ids[1:]
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | 单条样本格式: [CLS]篇章[SEP]问题[SEP]答案[SEP] |
20,732 | from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import re
reader = ReadingComprehension(
start_id=None,
end_id=tokenizer._token_end_id,
maxlen=max_a_len,
mode='extractive',
device=device
)
The provided code snippet includes necessary dependencies for implementing the `predict_to_file` function. Write a Python function `def predict_to_file(data, filename, topk=1)` to solve the following problem:
将预测结果输出到文件,方便评估
Here is the function:
def predict_to_file(data, filename, topk=1):
"""将预测结果输出到文件,方便评估
"""
with open(filename, 'w', encoding='utf-8') as f:
for d in tqdm(iter(data), desc=u'正在预测(共%s条样本)' % len(data)):
q_text = d['question']
p_texts = [p['passage'] for p in d['passages']]
a = reader.answer(q_text, p_texts, topk)
if a:
s = u'%s\t%s\n' % (d['id'], a)
else:
s = u'%s\t\n' % (d['id'])
f.write(s)
f.flush() | 将预测结果输出到文件,方便评估 |
20,733 | t4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import glob
= 256
batch_size = 16
epochs = 10000
fig_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
train_dataloader = DataLoader(ListDataset(glob.glob('E:/data/corpus/sentence_classification/THUCNews/*/*.txt')),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
model = build_transformer_model(
config_path,
checkpoint_path,
with_mlm=True,
application='unilm',
keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
add_trainer=True
).to(device)
summary(model, input_data=[next(iter(train_dataloader))[0]])
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1e-5))
autotitle = AutoTitle(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=32, device=device)
Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
just_show()
evaluator = Evaluator()
model.fit(
train_dataloader,
steps_per_epoch=None,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.pt')
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for txt in batch:
text = open(txt, encoding='utf-8').read()
text = text.split('\n')
if len(text) > 1:
title = text[0]
content = '\n'.join(text[1:])
token_ids, segment_ids = tokenizer.encode(content, title, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | 单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP] |
20,734 | t4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import glob
= 256
batch_size = 16
epochs = 10000
fig_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
train_dataloader = DataLoader(ListDataset(glob.glob('E:/data/corpus/sentence_classification/THUCNews/*/*.txt')),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
model = build_transformer_model(
config_path,
checkpoint_path,
with_mlm=True,
application='unilm',
keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
add_trainer=True
).to(device)
summary(model, input_data=[next(iter(train_dataloader))[0]])
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1e-5))
autotitle = AutoTitle(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=32, device=device)
Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
just_show()
evaluator = Evaluator()
model.fit(
train_dataloader,
steps_per_epoch=None,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.pt')
def just_show():
s1 = u'夏天来临,皮肤在强烈紫外线的照射下,晒伤不可避免,因此,晒后及时修复显得尤为重要,否则可能会造成长期伤害。专家表示,选择晒后护肤品要慎重,芦荟凝胶是最安全,有效的一种选择,晒伤严重者,还请及 时 就医 。'
s2 = u'8月28日,网络爆料称,华住集团旗下连锁酒店用户数据疑似发生泄露。从卖家发布的内容看,数据包含华住旗下汉庭、禧玥、桔子、宜必思等10余个品牌酒店的住客信息。泄露的信息包括华住官网注册资料、酒店入住登记的身份信息及酒店开房记录,住客姓名、手机号、邮箱、身份证号、登录账号密码等。卖家对这个约5亿条数据打包出售。第三方安全平台威胁猎人对信息出售者提供的三万条数据进行验证,认为数据真实性非常高。当天下午 ,华 住集 团发声明称,已在内部迅速开展核查,并第一时间报警。当晚,上海警方消息称,接到华住集团报案,警方已经介入调查。'
for s in [s1, s2]:
print(u'生成标题:', autotitle.generate(s)) | null |
20,735 | json, os
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
def process_data():
if os.path.exists('E:/data/corpus/qa/CIPS-SOGOU/train_data_list_format.json'):
return
# 标注数据
webqa_data = json.load(open('E:/data/corpus/qa/WebQA.json', encoding='utf-8'))
sogou_data = json.load(open('E:/data/corpus/qa/SogouQA.json', encoding='utf-8'))
# 筛选数据
seps, strips = u'\n。!?!?;;,, ', u';;,, '
data = []
for d in webqa_data + sogou_data:
for p in d['passages']:
if p['answer']:
for t in text_segmentate(p['passage'], max_p_len - 2, seps, strips):
if p['answer'] in t:
data.append((t, d['question'], p['answer']))
del webqa_data
del sogou_data
# 保存一个随机序(供划分valid用)
random_order = list(range(len(data)))
np.random.seed(2022)
np.random.shuffle(random_order)
# 划分valid
train_data = [data[j] for i, j in enumerate(random_order) if i % 10 != 0]
valid_data = [data[j] for i, j in enumerate(random_order) if i % 10 == 0]
json.dump(train_data, open('E:/data/corpus/qa/CIPS-SOGOU/train_data_list_format.json', 'w'), indent=4)
json.dump(valid_data, open('E:/data/corpus/qa/CIPS-SOGOU/valid_data_list_format.json', 'w'), indent=4) | null |
20,736 | from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
max_q_len = 64
max_a_len = 16
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(token_dict, do_lower_case=True)
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for (p, q, a) in batch:
p_token_ids, _ = tokenizer.encode(p, maxlen=max_p_len + 1)
a_token_ids, _ = tokenizer.encode(a, maxlen=max_a_len)
q_token_ids, _ = tokenizer.encode(q, maxlen=max_q_len)
token_ids = p_token_ids + a_token_ids[1:] + q_token_ids[1:] # 去掉answer和question的cls位
segment_ids = [0] * len(p_token_ids)
segment_ids += [1] * (len(token_ids) - len(p_token_ids))
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | 单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP] |
20,737 | from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
qag = QuestionAnswerGeneration(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=max_q_len, device=device)
The provided code snippet includes necessary dependencies for implementing the `predict_to_file` function. Write a Python function `def predict_to_file(data, filename, topk=1)` to solve the following problem:
将预测结果输出到文件,方便评估
Here is the function:
def predict_to_file(data, filename, topk=1):
"""将预测结果输出到文件,方便评估
"""
with open(filename, 'w', encoding='utf-8') as f:
for d in tqdm(iter(data), desc=u'正在预测(共%s条样本)' % len(data)):
q, a = qag.generate(d[0])
s = '%s\t%s\t%s\n' % (q, a, d[0])
f.write(s)
f.flush() | 将预测结果输出到文件,方便评估 |
20,738 |
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP]
"""
batch_content_ids, batch_titile_ids = [], []
for title, content in batch:
token_ids, _ = tokenizer.encode(content, maxlen=max_c_len)
batch_content_ids.append(token_ids)
token_ids, _ = tokenizer.encode(title, maxlen=max_t_len)
batch_titile_ids.append([0] + token_ids)
batch_content_ids = torch.tensor(sequence_padding(batch_content_ids, value=pad_token_id), dtype=torch.long, device=device)
batch_titile_ids = torch.tensor(sequence_padding(batch_titile_ids, value=pad_token_id), dtype=torch.long, device=device)
return [[batch_content_ids], [batch_titile_ids[:, :-1]]], batch_titile_ids[:, 1:].flatten() | 单条样本格式:content:[CLS]文章[SEP] tgt: [CLS]标题[SEP] |
20,740 | import torch
from torch.utils.data import DataLoader
from model import uie_model, tokenizer, custom_model
from bert4torch.snippets import seed_everything, sequence_padding
from bert4torch.callbacks import Callback
from torch import nn
from torch.utils.data import Dataset
import json
from utils import get_bool_ids_greater_than, get_span
from random import sample
max_seq_len = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def map_offset(ori_offset, offset_mapping):
"""map ori offset to token offset
"""
for index, span in enumerate(offset_mapping):
if span[0] <= ori_offset < span[1]:
return index
return -1
tokenizer = Tokenizer(dict_path, do_lower_case=True)
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
example: {title, prompt, content, result_list}
Here is the function:
def collate_fn(batch):
"""example: {title, prompt, content, result_list}
"""
batch_token_ids, batch_token_type_ids, batch_start_ids, batch_end_ids = [], [], [], []
for example in batch:
token_ids, token_type_ids, offset_mapping = tokenizer.encode(example["prompt"], example["content"],
maxlen=max_seq_len, return_offsets='transformers')
bias = 0
for index in range(len(offset_mapping)):
if index == 0:
continue
mapping = offset_mapping[index]
if mapping[0] == 0 and mapping[1] == 0 and bias == 0:
bias = index
if mapping[0] == 0 and mapping[1] == 0:
continue
offset_mapping[index][0] += bias
offset_mapping[index][1] += bias
start_ids = [0 for _ in range(len(token_ids))]
end_ids = [0 for _ in range(len(token_ids))]
for item in example["result_list"]:
start = map_offset(item["start"] + bias, offset_mapping)
end = map_offset(item["end"] - 1 + bias, offset_mapping)
start_ids[start] = 1.0
end_ids[end] = 1.0
batch_token_ids.append(token_ids)
batch_token_type_ids.append(token_type_ids)
batch_start_ids.append(start_ids)
batch_end_ids.append(end_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_token_type_ids = torch.tensor(sequence_padding(batch_token_type_ids), dtype=torch.long, device=device)
batch_start_ids = torch.tensor(sequence_padding(batch_start_ids), dtype=torch.float, device=device)
batch_end_ids = torch.tensor(sequence_padding(batch_end_ids), dtype=torch.float, device=device)
return [batch_token_ids, batch_token_type_ids], [batch_start_ids, batch_end_ids] | example: {title, prompt, content, result_list} |
20,741 | import contextlib
import functools
import json
import logging
import math
import random
import re
import shutil
import threading
import time
from functools import partial
import colorlog
import numpy as np
import torch
from colorama import Back, Fore
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `get_span` function. Write a Python function `def get_span(start_ids, end_ids, with_prob=False)` to solve the following problem:
Get span set from position start and end list. Args: start_ids (List[int]/List[tuple]): The start index list. end_ids (List[int]/List[tuple]): The end index list. with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability). Returns: set: The span set without overlapping, every id can only be used once .
Here is the function:
def get_span(start_ids, end_ids, with_prob=False):
"""
Get span set from position start and end list.
Args:
start_ids (List[int]/List[tuple]): The start index list.
end_ids (List[int]/List[tuple]): The end index list.
with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability).
Returns:
set: The span set without overlapping, every id can only be used once .
"""
if with_prob:
start_ids = sorted(start_ids, key=lambda x: x[0])
end_ids = sorted(end_ids, key=lambda x: x[0])
else:
start_ids = sorted(start_ids)
end_ids = sorted(end_ids)
start_pointer = 0
end_pointer = 0
len_start = len(start_ids)
len_end = len(end_ids)
couple_dict = {}
while start_pointer < len_start and end_pointer < len_end:
if with_prob:
start_id = start_ids[start_pointer][0]
end_id = end_ids[end_pointer][0]
else:
start_id = start_ids[start_pointer]
end_id = end_ids[end_pointer]
if start_id == end_id:
couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]
start_pointer += 1
end_pointer += 1
continue
if start_id < end_id:
couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]
start_pointer += 1
continue
if start_id > end_id:
end_pointer += 1
continue
result = [(couple_dict[end], end) for end in couple_dict]
result = set(result)
return result | Get span set from position start and end list. Args: start_ids (List[int]/List[tuple]): The start index list. end_ids (List[int]/List[tuple]): The end index list. with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability). Returns: set: The span set without overlapping, every id can only be used once . |
20,742 | import contextlib
import functools
import json
import logging
import math
import random
import re
import shutil
import threading
import time
from functools import partial
import colorlog
import numpy as np
import torch
from colorama import Back, Fore
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `get_bool_ids_greater_than` function. Write a Python function `def get_bool_ids_greater_than(probs, limit=0.5, return_prob=False)` to solve the following problem:
Get idx of the last dimension in probability arrays, which is greater than a limitation. Args: probs (List[List[float]]): The input probability arrays. limit (float): The limitation for probability. return_prob (bool): Whether to return the probability Returns: List[List[int]]: The index of the last dimension meet the conditions.
Here is the function:
def get_bool_ids_greater_than(probs, limit=0.5, return_prob=False):
"""
Get idx of the last dimension in probability arrays, which is greater than a limitation.
Args:
probs (List[List[float]]): The input probability arrays.
limit (float): The limitation for probability.
return_prob (bool): Whether to return the probability
Returns:
List[List[int]]: The index of the last dimension meet the conditions.
"""
probs = np.array(probs)
dim_len = len(probs.shape)
if dim_len > 1:
result = []
for p in probs:
result.append(get_bool_ids_greater_than(p, limit, return_prob))
return result
else:
result = []
for i, p in enumerate(probs):
if p > limit:
if return_prob:
result.append((i, p))
else:
result.append(i)
return result | Get idx of the last dimension in probability arrays, which is greater than a limitation. Args: probs (List[List[float]]): The input probability arrays. limit (float): The limitation for probability. return_prob (bool): Whether to return the probability Returns: List[List[int]]: The index of the last dimension meet the conditions. |
20,743 | import contextlib
import functools
import json
import logging
import math
import random
import re
import shutil
import threading
import time
from functools import partial
import colorlog
import numpy as np
import torch
from colorama import Back, Fore
from tqdm import tqdm
def get_id_and_prob(spans, offset_map):
prompt_length = 0
for i in range(1, len(offset_map)):
if offset_map[i] != [0, 0]:
prompt_length += 1
else:
break
for i in range(1, prompt_length + 1):
offset_map[i][0] -= (prompt_length + 1)
offset_map[i][1] -= (prompt_length + 1)
sentence_id = []
prob = []
for start, end in spans:
prob.append(start[1] * end[1])
sentence_id.append(
(offset_map[start[0]][0], offset_map[end[0]][1]))
return sentence_id, prob | null |
20,744 | import contextlib
import functools
import json
import logging
import math
import random
import re
import shutil
import threading
import time
from functools import partial
import colorlog
import numpy as np
import torch
from colorama import Back, Fore
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `cut_chinese_sent` function. Write a Python function `def cut_chinese_sent(para)` to solve the following problem:
Cut the Chinese sentences more precisely, reference to "https://blog.csdn.net/blmoistawinde/article/details/82379256".
Here is the function:
def cut_chinese_sent(para):
"""
Cut the Chinese sentences more precisely, reference to
"https://blog.csdn.net/blmoistawinde/article/details/82379256".
"""
para = re.sub(r'([。!?\?])([^”’])', r'\1\n\2', para)
para = re.sub(r'(\.{6})([^”’])', r'\1\n\2', para)
para = re.sub(r'(\…{2})([^”’])', r'\1\n\2', para)
para = re.sub(r'([。!?\?][”’])([^,。!?\?])', r'\1\n\2', para)
para = para.rstrip()
return para.split("\n") | Cut the Chinese sentences more precisely, reference to "https://blog.csdn.net/blmoistawinde/article/details/82379256". |
20,745 | import contextlib
import functools
import json
import logging
import math
import random
import re
import shutil
import threading
import time
from functools import partial
import colorlog
import numpy as np
import torch
from colorama import Back, Fore
from tqdm import tqdm
def dbc2sbc(s):
rs = ""
for char in s:
code = ord(char)
if code == 0x3000:
code = 0x0020
else:
code -= 0xfee0
if not (0x0021 <= code and code <= 0x7e):
rs += char
continue
rs += chr(code)
return rs | null |
20,746 | import contextlib
import functools
import json
import logging
import math
import random
import re
import shutil
import threading
import time
from functools import partial
import colorlog
import numpy as np
import torch
from colorama import Back, Fore
from tqdm import tqdm
logger = Logger()
tqdm = partial(tqdm, bar_format=BAR_FORMAT, ascii=BAR_TYPE[0], leave=False)
The provided code snippet includes necessary dependencies for implementing the `get_path_from_url` function. Write a Python function `def get_path_from_url(url, root_dir, check_exist=True, decompress=True)` to solve the following problem:
Download from given url to root_dir. if file or directory specified by url is exists under root_dir, return the path directly, otherwise download from url and decompress it, return the path. Args: url (str): download url root_dir (str): root dir for downloading, it should be WEIGHTS_HOME or DATASET_HOME decompress (bool): decompress zip or tar file. Default is `True` Returns: str: a local path to save downloaded models & weights & datasets.
Here is the function:
def get_path_from_url(url,
root_dir,
check_exist=True,
decompress=True):
""" Download from given url to root_dir.
if file or directory specified by url is exists under
root_dir, return the path directly, otherwise download
from url and decompress it, return the path.
Args:
url (str): download url
root_dir (str): root dir for downloading, it should be
WEIGHTS_HOME or DATASET_HOME
decompress (bool): decompress zip or tar file. Default is `True`
Returns:
str: a local path to save downloaded models & weights & datasets.
"""
import os.path
import os
import tarfile
import zipfile
def is_url(path):
"""
Whether path is URL.
Args:
path (string): URL string or not.
"""
return path.startswith('http://') or path.startswith('https://')
def _map_path(url, root_dir):
# parse path after download under root_dir
fname = os.path.split(url)[-1]
fpath = fname
return os.path.join(root_dir, fpath)
def _get_download(url, fullname):
import requests
# using requests.get method
fname = os.path.basename(fullname)
try:
req = requests.get(url, stream=True)
except Exception as e: # requests.exceptions.ConnectionError
logger.info("Downloading {} from {} failed with exception {}".format(
fname, url, str(e)))
return False
if req.status_code != 200:
raise RuntimeError("Downloading from {} failed with code "
"{}!".format(url, req.status_code))
# For protecting download interupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
total_size = req.headers.get('content-length')
with open(tmp_fullname, 'wb') as f:
if total_size:
with tqdm(total=(int(total_size) + 1023) // 1024, unit='KB') as pbar:
for chunk in req.iter_content(chunk_size=1024):
f.write(chunk)
pbar.update(1)
else:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
shutil.move(tmp_fullname, fullname)
return fullname
def _download(url, path):
"""
Download from url, save to path.
url (str): download url
path (str): download to given path
"""
if not os.path.exists(path):
os.makedirs(path)
fname = os.path.split(url)[-1]
fullname = os.path.join(path, fname)
retry_cnt = 0
logger.info("Downloading {} from {}".format(fname, url))
DOWNLOAD_RETRY_LIMIT = 3
while not os.path.exists(fullname):
if retry_cnt < DOWNLOAD_RETRY_LIMIT:
retry_cnt += 1
else:
raise RuntimeError("Download from {} failed. "
"Retry limit reached".format(url))
if not _get_download(url, fullname):
time.sleep(1)
continue
return fullname
def _uncompress_file_zip(filepath):
with zipfile.ZipFile(filepath, 'r') as files:
file_list = files.namelist()
file_dir = os.path.dirname(filepath)
if _is_a_single_file(file_list):
rootpath = file_list[0]
uncompressed_path = os.path.join(file_dir, rootpath)
files.extractall(file_dir)
elif _is_a_single_dir(file_list):
# `strip(os.sep)` to remove `os.sep` in the tail of path
rootpath = os.path.splitext(file_list[0].strip(os.sep))[0].split(
os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
files.extractall(file_dir)
else:
rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
if not os.path.exists(uncompressed_path):
os.makedirs(uncompressed_path)
files.extractall(os.path.join(file_dir, rootpath))
return uncompressed_path
def _is_a_single_file(file_list):
if len(file_list) == 1 and file_list[0].find(os.sep) < 0:
return True
return False
def _is_a_single_dir(file_list):
new_file_list = []
for file_path in file_list:
if '/' in file_path:
file_path = file_path.replace('/', os.sep)
elif '\\' in file_path:
file_path = file_path.replace('\\', os.sep)
new_file_list.append(file_path)
file_name = new_file_list[0].split(os.sep)[0]
for i in range(1, len(new_file_list)):
if file_name != new_file_list[i].split(os.sep)[0]:
return False
return True
def _uncompress_file_tar(filepath, mode="r:*"):
with tarfile.open(filepath, mode) as files:
file_list = files.getnames()
file_dir = os.path.dirname(filepath)
if _is_a_single_file(file_list):
rootpath = file_list[0]
uncompressed_path = os.path.join(file_dir, rootpath)
files.extractall(file_dir)
elif _is_a_single_dir(file_list):
rootpath = os.path.splitext(file_list[0].strip(os.sep))[0].split(
os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
files.extractall(file_dir)
else:
rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
if not os.path.exists(uncompressed_path):
os.makedirs(uncompressed_path)
files.extractall(os.path.join(file_dir, rootpath))
return uncompressed_path
def _decompress(fname):
"""
Decompress for zip and tar file
"""
logger.info("Decompressing {}...".format(fname))
# For protecting decompressing interupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.
if tarfile.is_tarfile(fname):
uncompressed_path = _uncompress_file_tar(fname)
elif zipfile.is_zipfile(fname):
uncompressed_path = _uncompress_file_zip(fname)
else:
raise TypeError("Unsupport compress file type {}".format(fname))
return uncompressed_path
assert is_url(url), "downloading from {} not a url".format(url)
fullpath = _map_path(url, root_dir)
if os.path.exists(fullpath) and check_exist:
logger.info("Found {}".format(fullpath))
else:
fullpath = _download(url, root_dir)
if decompress and (tarfile.is_tarfile(fullpath) or
zipfile.is_zipfile(fullpath)):
fullpath = _decompress(fullpath)
return fullpath | Download from given url to root_dir. if file or directory specified by url is exists under root_dir, return the path directly, otherwise download from url and decompress it, return the path. Args: url (str): download url root_dir (str): root dir for downloading, it should be WEIGHTS_HOME or DATASET_HOME decompress (bool): decompress zip or tar file. Default is `True` Returns: str: a local path to save downloaded models & weights & datasets. |
20,747 | import re
import json
en2ch = {
'ORG':'机构',
'PER':'人名',
'LOC':'籍贯'
}
def preprocess(input_path, save_path, mode):
if not os.path.exists(save_path):
os.makedirs(save_path)
data_path = os.path.join(save_path, mode + ".json")
result = []
tmp = {}
tmp['id'] = 0
tmp['text'] = ''
tmp['relations'] = []
tmp['entities'] = []
# =======先找出句子和句子中的所有实体和类型=======
with open(input_path,'r',encoding='utf-8') as fp:
lines = fp.readlines()
texts = []
entities = []
words = []
entity_tmp = []
entities_tmp = []
entity_label = ''
for line in lines:
line = line.strip().split(" ")
if len(line) == 2:
word = line[0]
label = line[1]
words.append(word)
if "B-" in label:
entity_tmp.append(word)
entity_label = en2ch[label.split("-")[-1]]
elif "I-" in label:
entity_tmp.append(word)
if (label == 'O') and entity_tmp:
if ("".join(entity_tmp), entity_label) not in entities_tmp:
entities_tmp.append(("".join(entity_tmp), entity_label))
entity_tmp, entity_label = [], ''
else:
if entity_tmp and (("".join(entity_tmp), entity_label) not in entities_tmp):
entities_tmp.append(("".join(entity_tmp), entity_label))
entity_tmp, entity_label = [], ''
texts.append("".join(words))
entities.append(entities_tmp)
words = []
entities_tmp = []
# ==========================================
# =======找出句子中实体的位置=======
i = 0
for text,entity in zip(texts, entities):
if entity:
ltmp = []
for ent,type in entity:
for span in re.finditer(ent, text):
start = span.start()
end = span.end()
ltmp.append((type, start, end, ent))
# print(ltmp)
ltmp = sorted(ltmp, key=lambda x:(x[1],x[2]))
for j in range(len(ltmp)):
# tmp['entities'].append(["".format(str(j)), ltmp[j][0], ltmp[j][1], ltmp[j][2], ltmp[j][3]])
tmp['entities'].append({"id":j, "start_offset":ltmp[j][1], "end_offset":ltmp[j][2], "label":ltmp[j][0]})
else:
tmp['entities'] = []
tmp['id'] = i
tmp['text'] = text
result.append(tmp)
tmp = {}
tmp['id'] = 0
tmp['text'] = ''
tmp['relations'] = []
tmp['entities'] = []
i += 1
with open(data_path, 'w', encoding='utf-8') as fp:
fp.write("\n".join([json.dumps(i, ensure_ascii=False) for i in result])) | null |
20,748 | import time
import argparse
import json
from decimal import Decimal
import numpy as np
from bert4torch.snippets import seed_everything
from utils import convert_ext_examples, convert_cls_examples, logger
logger = Logger()
def convert_cls_examples(raw_examples, prompt_prefix, options):
examples = []
logger.info(f"Converting doccano data...")
with tqdm(total=len(raw_examples)) as pbar:
for line in raw_examples:
items = json.loads(line)
# Compatible with doccano >= 1.6.2
if "data" in items.keys():
text, labels = items["data"], items["label"]
else:
text, labels = items["text"], items["label"]
random.shuffle(options)
prompt = ""
sep = ","
for option in options:
prompt += option
prompt += sep
prompt = prompt_prefix + "[" + prompt.rstrip(sep) + "]"
result_list = []
example = {
"content": text,
"result_list": result_list,
"prompt": prompt
}
for label in labels:
start = prompt.rfind(label[0]) - len(prompt) - 1
end = start + len(label)
result = {"text": label, "start": start, "end": end}
example["result_list"].append(result)
examples.append(example)
return examples
def convert_ext_examples(raw_examples, negative_ratio, is_train=True):
texts = []
entity_examples = []
relation_examples = []
entity_prompts = []
relation_prompts = []
entity_label_set = []
entity_name_set = []
predicate_set = []
subject_goldens = []
logger.info(f"Converting doccano data...")
with tqdm(total=len(raw_examples)) as pbar:
for line in raw_examples:
items = json.loads(line)
entity_id = 0
if "data" in items.keys():
relation_mode = False
if isinstance(items["label"],
dict) and "entities" in items["label"].keys():
relation_mode = True
text = items["data"]
entities = []
if not relation_mode:
# Export file in JSONL format which doccano < 1.7.0
for item in items["label"]:
entity = {
"id": entity_id,
"start_offset": item[0],
"end_offset": item[1],
"label": item[2]
}
entities.append(entity)
entity_id += 1
else:
# Export file in JSONL format for relation labeling task which doccano < 1.7.0
for item in items["label"]["entities"]:
entity = {
"id": entity_id,
"start_offset": item["start_offset"],
"end_offset": item["end_offset"],
"label": item["label"]
}
entities.append(entity)
entity_id += 1
relations = []
else:
# Export file in JSONL format which doccano >= 1.7.0
if "label" in items.keys():
text = items["text"]
entities = []
for item in items["label"]:
entity = {
"id": entity_id,
"start_offset": item[0],
"end_offset": item[1],
"label": item[2]
}
entities.append(entity)
entity_id += 1
relations = []
else:
# Export file in JSONL (relation) format
text, relations, entities = items["text"], items[
"relations"], items["entities"]
texts.append(text)
entity_example = []
entity_prompt = []
entity_example_map = {}
entity_map = {} # id to entity name
for entity in entities:
entity_name = text[entity["start_offset"]:entity["end_offset"]]
entity_map[entity["id"]] = {
"name": entity_name,
"start": entity["start_offset"],
"end": entity["end_offset"]
}
entity_label = entity["label"]
result = {
"text": entity_name,
"start": entity["start_offset"],
"end": entity["end_offset"]
}
if entity_label not in entity_example_map.keys():
entity_example_map[entity_label] = {
"content": text,
"result_list": [result],
"prompt": entity_label
}
else:
entity_example_map[entity_label]["result_list"].append(
result)
if entity_label not in entity_label_set:
entity_label_set.append(entity_label)
if entity_name not in entity_name_set:
entity_name_set.append(entity_name)
entity_prompt.append(entity_label)
for v in entity_example_map.values():
entity_example.append(v)
entity_examples.append(entity_example)
entity_prompts.append(entity_prompt)
subject_golden = []
relation_example = []
relation_prompt = []
relation_example_map = {}
for relation in relations:
predicate = relation["type"]
subject_id = relation["from_id"]
object_id = relation["to_id"]
# The relation prompt is constructed as follows:
# subject + "的" + predicate
prompt = entity_map[subject_id]["name"] + "的" + predicate
if entity_map[subject_id]["name"] not in subject_golden:
subject_golden.append(entity_map[subject_id]["name"])
result = {
"text": entity_map[object_id]["name"],
"start": entity_map[object_id]["start"],
"end": entity_map[object_id]["end"]
}
if prompt not in relation_example_map.keys():
relation_example_map[prompt] = {
"content": text,
"result_list": [result],
"prompt": prompt
}
else:
relation_example_map[prompt]["result_list"].append(result)
if predicate not in predicate_set:
predicate_set.append(predicate)
relation_prompt.append(prompt)
for v in relation_example_map.values():
relation_example.append(v)
relation_examples.append(relation_example)
relation_prompts.append(relation_prompt)
subject_goldens.append(subject_golden)
pbar.update(1)
def concat_examples(positive_examples, negative_examples, negative_ratio):
examples = []
if math.ceil(len(negative_examples) /
len(positive_examples)) <= negative_ratio:
examples = positive_examples + negative_examples
else:
# Random sampling the negative examples to ensure overall negative ratio unchanged.
idxs = random.sample(
range(0, len(negative_examples)),
negative_ratio * len(positive_examples))
negative_examples_sampled = []
for idx in idxs:
negative_examples_sampled.append(negative_examples[idx])
examples = positive_examples + negative_examples_sampled
return examples
logger.info(f"Adding negative samples for first stage prompt...")
positive_examples, negative_examples = add_negative_example(
entity_examples, texts, entity_prompts, entity_label_set,
negative_ratio)
if len(positive_examples) == 0:
all_entity_examples = []
elif is_train:
all_entity_examples = concat_examples(positive_examples,
negative_examples, negative_ratio)
else:
all_entity_examples = positive_examples + negative_examples
all_relation_examples = []
if len(predicate_set) != 0:
if is_train:
logger.info(f"Adding negative samples for second stage prompt...")
relation_prompt_set = construct_relation_prompt_set(entity_name_set,
predicate_set)
positive_examples, negative_examples = add_negative_example(
relation_examples, texts, relation_prompts, relation_prompt_set,
negative_ratio)
all_relation_examples = concat_examples(
positive_examples, negative_examples, negative_ratio)
else:
logger.info(f"Adding negative samples for second stage prompt...")
relation_examples = add_full_negative_example(
relation_examples, texts, relation_prompts, predicate_set,
subject_goldens)
all_relation_examples = [
r
for r in relation_example
for relation_example in relation_examples
]
return all_entity_examples, all_relation_examples
def do_convert():
seed_everything(args.seed)
tic_time = time.time()
if not os.path.exists(args.doccano_file):
raise ValueError("Please input the correct path of doccano file.")
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if len(args.splits) != 0 and len(args.splits) != 3:
raise ValueError("Only []/ len(splits)==3 accepted for splits.")
def _check_sum(splits):
return Decimal(str(splits[0])) + Decimal(str(splits[1])) + Decimal(
str(splits[2])) == Decimal("1")
if len(args.splits) == 3 and not _check_sum(args.splits):
raise ValueError(
"Please set correct splits, sum of elements in splits should be equal to 1."
)
with open(args.doccano_file, "r", encoding="utf-8") as f:
raw_examples = f.readlines()
def _create_ext_examples(examples,
negative_ratio=0,
shuffle=False,
is_train=True):
entities, relations = convert_ext_examples(
examples, negative_ratio, is_train=is_train)
examples = entities + relations
if shuffle:
indexes = np.random.permutation(len(examples))
examples = [examples[i] for i in indexes]
return examples
def _create_cls_examples(examples, prompt_prefix, options, shuffle=False):
examples = convert_cls_examples(examples, prompt_prefix, options)
if shuffle:
indexes = np.random.permutation(len(examples))
examples = [examples[i] for i in indexes]
return examples
def _save_examples(save_dir, file_name, examples):
count = 0
save_path = os.path.join(save_dir, file_name)
if not examples:
logger.info("Skip saving %d examples to %s." % (0, save_path))
return
with open(save_path, "w", encoding="utf-8") as f:
for example in examples:
f.write(json.dumps(example, ensure_ascii=False) + "\n")
count += 1
logger.info("Save %d examples to %s." % (count, save_path))
if len(args.splits) == 0:
if args.task_type == "ext":
examples = _create_ext_examples(raw_examples, args.negative_ratio,
args.is_shuffle)
else:
examples = _create_cls_examples(raw_examples, args.prompt_prefix,
args.options, args.is_shuffle)
_save_examples(args.save_dir, "train.txt", examples)
else:
if args.is_shuffle:
indexes = np.random.permutation(len(raw_examples))
raw_examples = [raw_examples[i] for i in indexes]
i1, i2, _ = args.splits
p1 = int(len(raw_examples) * i1)
p2 = int(len(raw_examples) * (i1 + i2))
if args.task_type == "ext":
train_examples = _create_ext_examples(
raw_examples[:p1], args.negative_ratio, args.is_shuffle)
dev_examples = _create_ext_examples(
raw_examples[p1:p2], -1, is_train=False)
test_examples = _create_ext_examples(
raw_examples[p2:], -1, is_train=False)
else:
train_examples = _create_cls_examples(
raw_examples[:p1], args.prompt_prefix, args.options)
dev_examples = _create_cls_examples(
raw_examples[p1:p2], args.prompt_prefix, args.options)
test_examples = _create_cls_examples(
raw_examples[p2:], args.prompt_prefix, args.options)
_save_examples(args.save_dir, "train.txt", train_examples)
_save_examples(args.save_dir, "dev.txt", dev_examples)
_save_examples(args.save_dir, "test.txt", test_examples)
logger.info('Finished! It takes %.2f seconds' % (time.time() - tic_time)) | null |
20,749 | import numpy as np
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.tokenizers import Tokenizer
from bert4torch.losses import MultilabelCategoricalCrossentropy
from bert4torch.layers import EfficientGlobalPointer
maxlen = 256
categories_label2id = {"LOC": 0, "ORG": 1, "PER": 2}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
batch_token_ids, batch_labels = [], []
for i, (text, text_labels) in enumerate(batch):
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros((len(categories_label2id), maxlen, maxlen))
for start, end, label in text_labels:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
label = categories_label2id[label]
labels[label, start, end] = 1
batch_token_ids.append(token_ids) # 前面已经限制了长度
batch_labels.append(labels[:, :len(token_ids), :len(token_ids)])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, seq_dims=3), dtype=torch.long, device=device)
return batch_token_ids, batch_label
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for i, (text, text_labels) in enumerate(batch):
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros((len(categories_label2id), maxlen, maxlen))
for start, end, label in text_labels:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
label = categories_label2id[label]
labels[label, start, end] = 1
batch_token_ids.append(token_ids) # 前面已经限制了长度
batch_labels.append(labels[:, :len(token_ids), :len(token_ids)])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, seq_dims=3), dtype=torch.long, device=device)
return batch_token_ids, batch_labels | null |
20,750 | import numpy as np
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.tokenizers import Tokenizer
from bert4torch.losses import MultilabelCategoricalCrossentropy
from bert4torch.layers import EfficientGlobalPointer
categories_id2label = dict((value, key) for key,value in categories_label2id.items())
for i, (text, text_labels) in enumerate(batch):
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros((len(categories_label2id), maxlen, maxlen))
for start, end, label in text_labels:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
label = categories_label2id[label]
labels[label, start, end] = 1
batch_token_ids.append(token_ids) # 前面已经限制了长度
batch_labels.append(labels[:, :len(token_ids), :len(token_ids)])
model = Model().to(device)
model.compile(loss=MyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5))
def evaluate(data, threshold=0.5):
X, Y, Z, threshold = 1e-10, 1e-10, 1e-10, 0
for x_true, label in data:
scores = model.predict(x_true)
for i, score in enumerate(scores):
R = set()
for l, start, end in zip(*np.where(score.cpu() > threshold)):
R.add((start, end, categories_id2label[l]))
T = set()
for l, start, end in zip(*np.where(label[i].cpu() > threshold)):
T.add((start, end, categories_id2label[l]))
X += len(R & T)
Y += len(R)
Z += len(T)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
return f1, precision, recall | null |
20,751 | import numpy as np
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.tokenizers import Tokenizer
from bert4torch.losses import MultilabelCategoricalCrossentropy
from bert4torch.layers import GlobalPointer
maxlen = 256
categories_label2id = {"LOC": 0, "ORG": 1, "PER": 2}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
batch_token_ids, batch_labels = [], []
for i, (text, text_labels) in enumerate(batch):
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros((len(categories_label2id), maxlen, maxlen))
for start, end, label in text_labels:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
label = categories_label2id[label]
labels[label, start, end] = 1
batch_token_ids.append(token_ids) # 前面已经限制了长度
batch_labels.append(labels[:, :len(token_ids), :len(token_ids)])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, seq_dims=3), dtype=torch.long, device=device)
return batch_token_ids, batch_label
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for i, (text, text_labels) in enumerate(batch):
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros((len(categories_label2id), maxlen, maxlen))
for start, end, label in text_labels:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
label = categories_label2id[label]
labels[label, start, end] = 1
batch_token_ids.append(token_ids) # 前面已经限制了长度
batch_labels.append(labels[:, :len(token_ids), :len(token_ids)])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, seq_dims=3), dtype=torch.long, device=device)
return batch_token_ids, batch_labels | null |
20,752 | import numpy as np
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.tokenizers import Tokenizer
from bert4torch.losses import MultilabelCategoricalCrossentropy
from bert4torch.layers import GlobalPointer
categories_id2label = dict((value, key) for key,value in categories_label2id.items())
for i, (text, text_labels) in enumerate(batch):
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros((len(categories_label2id), maxlen, maxlen))
for start, end, label in text_labels:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
label = categories_label2id[label]
labels[label, start, end] = 1
batch_token_ids.append(token_ids) # 前面已经限制了长度
batch_labels.append(labels[:, :len(token_ids), :len(token_ids)])
model = Model().to(device)
model.compile(loss=MyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5))
def evaluate(data, threshold=0):
X, Y, Z = 0, 1e-10, 1e-10
for x_true, label in data:
scores = model.predict(x_true)
for i, score in enumerate(scores):
R = set()
for l, start, end in zip(*np.where(score.cpu() > threshold)):
R.add((start, end, categories_id2label[l]))
T = set()
for l, start, end in zip(*np.where(label[i].cpu() > 0)):
T.add((start, end, categories_id2label[l]))
X += len(R & T)
Y += len(R)
Z += len(T)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
return f1, precision, recall | null |
20,753 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
D = []
with open(filename, encoding='utf-8') as f:
f = f.read()
for l in f.split('\n\n'):
if not l:
continue
d = ['']
for i, c in enumerate(l.split('\n')):
char, flag = c.split(' ')
d[0] += char
if flag[0] == 'B':
d.append([i, i, flag[2:]])
elif flag[0] == 'I':
d[-1][1] = i
D.append(d)
return
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = categories_label2id['B-'+label]
labels[start + 1:end + 1] = categories_label2id['I-'+label]
batch_token_ids.append(token_ids)
batch_labels.append(labels)
for d in tqdm(train_data, desc='Generate init_trasitions'):
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = categories_label2id['B-'+label]
labels[start + 1:end + 1] = categories_label2id['I-'+label]
for i in range(len(labels)-1):
transition[int(labels[i]), int(labels[i+1])] += 1
start_transition[int(labels[0])] += 1 # start转移到标签
end_transition[int(labels[-1])] += 1 # 标签转移到end
def load_data(filename):
D = []
with open(filename, encoding='utf-8') as f:
f = f.read()
for l in f.split('\n\n'):
if not l:
continue
d = ['']
for i, c in enumerate(l.split('\n')):
char, flag = c.split(' ')
d[0] += char
if flag[0] == 'B':
d.append([i, i, flag[2:]])
elif flag[0] == 'I':
d[-1][1] = i
D.append(d)
return D | null |
20,754 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
maxlen = 256
categories_label2id = {k: i for i, k in enumerate(categories)}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
batch_token_ids, batch_labels = [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = categories_label2id['B-'+label]
labels[start + 1:end + 1] = categories_label2id['I-'+label]
batch_token_ids.append(token_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
return batch_token_ids, batch_label
for d in tqdm(train_data, desc='Generate init_trasitions'):
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = categories_label2id['B-'+label]
labels[start + 1:end + 1] = categories_label2id['I-'+label]
for i in range(len(labels)-1):
transition[int(labels[i]), int(labels[i+1])] += 1
start_transition[int(labels[0])] += 1 # start转移到标签
end_transition[int(labels[-1])] += 1 # 标签转移到end
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = categories_label2id['B-'+label]
labels[start + 1:end + 1] = categories_label2id['I-'+label]
batch_token_ids.append(token_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
return batch_token_ids, batch_labels | null |
20,755 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
def predict(self, token_ids):
self.eval()
with torch.no_grad():
emission_score, attention_mask = self.forward(token_ids)
best_path = self.crf.decode(emission_score, attention_mask) # [btz, seq_len]
return best_path
model =
model.compile(loss=Loss(), optimizer=optim.Adam(model.parameters(), lr=2e-5))
def trans_entity2tuple(scores):
'''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
'''
batch_entity_ids = set()
for i, one_samp in enumerate(scores):
entity_ids = []
for j, item in enumerate(one_samp):
flag_tag = categories_id2label[item.item()]
if flag_tag.startswith('B-'): # B
entity_ids.append([i, j, j, flag_tag[2:]])
elif len(entity_ids) == 0:
continue
elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:]==entity_ids[-1][-1]): # I
entity_ids[-1][-2] = j
elif len(entity_ids[-1]) > 0:
entity_ids.append([])
for i in entity_ids:
if i:
batch_entity_ids.add(tuple(i))
return batch_entity_ids
def evaluate(data):
X, Y, Z = 1e-10, 1e-10, 1e-10
X2, Y2, Z2 = 1e-10, 1e-10, 1e-10
for token_ids, label in tqdm(data):
scores = model.predict(token_ids) # [btz, seq_len]
attention_mask = label.gt(0)
# token粒度
X += (scores.eq(label) * attention_mask).sum().item()
Y += scores.gt(0).sum().item()
Z += label.gt(0).sum().item()
# entity粒度
entity_pred = trans_entity2tuple(scores)
entity_true = trans_entity2tuple(label)
X2 += len(entity_pred.intersection(entity_true))
Y2 += len(entity_pred)
Z2 += len(entity_true)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
f2, precision2, recall2 = 2 * X2 / (Y2 + Z2), X2/ Y2, X2 / Z2
return f1, precision, recall, f2, precision2, recall2 | null |
20,756 | import numpy as np
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.tokenizers import Tokenizer
from bert4torch.losses import MultilabelCategoricalCrossentropy
from bert4torch.layers import TplinkerHandshakingKernel
def trans_ij2k(seq_len, i, j):
'''把第i行,第j列转化成上三角flat后的序号
'''
if (i > seq_len - 1) or (j > seq_len - 1) or (i > j):
return 0
return int(0.5*(2*seq_len-i+1)*i+(j-i))
The provided code snippet includes necessary dependencies for implementing the `trans_ij2k` function. Write a Python function `def trans_ij2k(seq_len, i, j)` to solve the following problem:
把第i行,第j列转化成上三角flat后的序号
Here is the function:
def trans_ij2k(seq_len, i, j):
'''把第i行,第j列转化成上三角flat后的序号
'''
if (i > seq_len - 1) or (j > seq_len - 1) or (i > j):
return 0
return int(0.5*(2*seq_len-i+1)*i+(j-i)) | 把第i行,第j列转化成上三角flat后的序号 |
20,757 | import numpy as np
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.tokenizers import Tokenizer
from bert4torch.losses import MultilabelCategoricalCrossentropy
from bert4torch.layers import TplinkerHandshakingKernel
categories_label2id = {"LOC": 0, "ORG": 1, "PER": 2}
def trans_ij2k(seq_len, i, j):
'''把第i行,第j列转化成上三角flat后的序号
'''
if (i > seq_len - 1) or (j > seq_len - 1) or (i > j):
return 0
return int(0.5*(2*seq_len-i+1)*i+(j-i))
tag2id = tran_ent_rel2id()
The provided code snippet includes necessary dependencies for implementing the `tran_ent_rel2id` function. Write a Python function `def tran_ent_rel2id()` to solve the following problem:
获取最后一个分类层的的映射关系
Here is the function:
def tran_ent_rel2id():
'''获取最后一个分类层的的映射关系
'''
tag2id = {}
for p in categories_label2id.keys():
tag2id[p] = len(tag2id)
return tag2id | 获取最后一个分类层的的映射关系 |
20,758 | import numpy as np
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.tokenizers import Tokenizer
from bert4torch.losses import MultilabelCategoricalCrossentropy
from bert4torch.layers import TplinkerHandshakingKernel
maxlen = 64
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def trans_ij2k(seq_len, i, j):
map_ij2k = {(i, j): trans_ij2k(maxlen, i, j) for i in range(maxlen) for j in range(maxlen) if j >= i}
tag2id = tran_ent_rel2id()
def collate_fn(batch):
pair_len = maxlen * (maxlen+1)//2
# batch_head_labels: [btz, pair_len, tag2id_len]
batch_labels = torch.zeros((len(batch), pair_len, len(tag2id)), dtype=torch.long, device=device)
batch_token_ids = []
for i, (tokens, labels) in enumerate(batch):
batch_token_ids.append(tokenizer.tokens_to_ids(tokens)) # 前面已经限制了长度
for s_i in labels:
if s_i[1] >= len(tokens): # 实体的结尾超过文本长度,则不标记
continue
batch_labels[i, map_ij2k[s_i[0], s_i[1]], tag2id[s_i[2]]] = 1
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, length=maxlen), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels | null |
20,759 | import numpy as np
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.tokenizers import Tokenizer
from bert4torch.losses import MultilabelCategoricalCrossentropy
from bert4torch.layers import TplinkerHandshakingKernel
def trans_ij2k(seq_len, i, j):
'''把第i行,第j列转化成上三角flat后的序号
'''
if (i > seq_len - 1) or (j > seq_len - 1) or (i > j):
return 0
return int(0.5*(2*seq_len-i+1)*i+(j-i))
map_k2ij = {v: k for k, v in map_ij2k.items()}
model = Model().to(device)
model.compile(loss=MultilabelCategoricalCrossentropy(), optimizer=optim.Adam(model.parameters(), lr=2e-5))
def evaluate(data, threshold=0):
X, Y, Z, threshold = 0, 1e-10, 1e-10, 0
for x_true, label in data:
scores = model.predict(x_true) # [btz, pair_len, tag_size]
for i, score in enumerate(scores):
R = set()
for pair_id, tag_id in zip(*np.where(score.cpu().numpy() > threshold)):
start, end = map_k2ij[pair_id][0], map_k2ij[pair_id][1]
R.add((start, end, tag_id))
T = set()
for pair_id, tag_id in zip(*np.where(label[i].cpu().numpy() > threshold)):
start, end = map_k2ij[pair_id][0], map_k2ij[pair_id][1]
T.add((start, end, tag_id))
X += len(R & T)
Y += len(R)
Z += len(T)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
return f1, precision, recall | null |
20,760 |
def collate_fn(batch):
batch_token_ids, batch_labels, batch_entity_ids, batch_entity_labels = [], [], [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
entity_ids, entity_labels = [], []
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = 1 # 标记B
labels[start + 1:end + 1] = 2 # 标记I
entity_ids.append([start, end])
entity_labels.append(categories.index(label)+1)
if not entity_ids: # 至少要有一个标签
entity_ids.append([0, 0]) # 如果没有则用0填充
entity_labels.append(0)
batch_token_ids.append(token_ids)
batch_labels.append(labels)
batch_entity_ids.append(entity_ids)
batch_entity_labels.append(entity_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device) # [btz, 实体个数,start/end]
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels), dtype=torch.long, device=device) # [btz, 实体个数]
return [batch_token_ids, batch_entity_ids], [batch_labels, batch_entity_labels] | null |
20,761 | import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
maxlen = 256
batch_size = 16
bio_tags = 3 ories = ['LOC', 'PER', 'ORG']
config_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
ataset(ListDataset):
= Tokenizer(dict_path, do_lower_case=True)
def collate_fn(batch):
batch_token_ids, batch_labels, batch_entity_ids, batch_entity_labels = [], [], [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
entity_ids, entity_labels = [], []
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = 1 # 标记B
labels[start + 1:end + 1] = 2 # 标记I
entity_ids.append([start, end])
entity_labels.append(categories.index(label)+1)
if not entity_ids: # 至少要有一个标签
entity_ids.append([0, 0]) # 如果没有则用0填充
entity_labels.append(0)
batch_token_ids.append(token_ids)
batch_labels.append(labels)
batch_entity_ids.append(entity_ids)
batch_entity_labels.append(entity_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device) # [btz, 实体个数,start/end]
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels), dtype=torch.long, device=device) # [btz, 实体个数]
return [batch_token_ids, batch_entity_ids], [batch_labels, batch_entity_labels]
aloader = DataLoader(MyDataset('E:/data/corpus/ner/china-people-daily-ner-corpus/example.train'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset('E:/data/corpus/ner/china-people-daily-ner-corpus/example.dev'), batch_size=batch_size, collate_fn=collate_fn)
eModel):
def predict(self, token_ids):
self.eval()
with torch.no_grad():
# 一阶段推理
last_hidden_state = self.bert([token_ids]) # [btz, seq_len, hdsz]
emission_score = self.dense1(last_hidden_state) # [bts, seq_len, tag_size]
attention_mask = token_ids.gt(0)
best_path = self.crf.decode(emission_score, attention_mask) # [bts, seq_len]
# 二阶段推理
batch_entity_ids = []
for one_samp in best_path:
entity_ids = []
for j, item in enumerate(one_samp):
if item.item() == 1: # B
entity_ids.append([j, j])
elif len(entity_ids) == 0:
continue
elif (len(entity_ids[-1]) > 0) and (item.item() == 2): # I
entity_ids[-1][-1] = j
elif len(entity_ids[-1]) > 0:
entity_ids.append([])
if not entity_ids: # 至少要有一个标签
entity_ids.append([0, 0]) # 如果没有则用0填充
batch_entity_ids.append([i for i in entity_ids if i])
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device) # [btz, 实体个数,start/end]
btz, entity_count, _ = batch_entity_ids.shape
hidden_size = last_hidden_state.shape[-1]
gather_index = batch_entity_ids.reshape(btz, -1, 1).repeat(1, 1, hidden_size)
entity_states = torch.gather(last_hidden_state, dim=1, index=gather_index).reshape(btz, entity_count, -1, hidden_size)
entity_states = torch.mean(entity_states, dim=2) # 取实体首尾hidden_states的均值
entity_logit = self.dense2(entity_states) # [btz, 实体个数,实体类型数]
entity_pred = torch.argmax(entity_logit, dim=-1) # [btz, 实体个数]
# 每个元素为一个三元组
entity_tulpe = trans_entity2tuple(batch_entity_ids, entity_pred)
return best_path, entity_tulpe
model = Model().to(device)
optim.Adam(model.parameters(), lr=2e-5))
def trans_entity2tuple(entity_ids, entity_labels):
'''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
'''
entity_true = set()
for i, one_sample in enumerate(entity_ids):
for j, item in enumerate(one_sample):
if item[0].item() * item[1].item() != 0:
entity_true.add((i, item[0].item(), item[1].item(), entity_labels[i, j].item()))
return entity_true
f __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=20, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
X1, Y1, Z1 = 1e-10, 1e-10, 1e-10
X2, Y2, Z2 = 1e-10, 1e-10, 1e-10
for (token_ids, entity_ids), (label, entity_labels) in tqdm(data):
scores, entity_pred = model.predict(token_ids) # [btz, seq_len]
# 一阶段指标: token粒度
attention_mask = label.gt(0)
X1 += (scores.eq(label) * attention_mask).sum().item()
Y1 += scores.gt(0).sum().item()
Z1 += label.gt(0).sum().item()
# 二阶段指标:entity粒度
entity_true = trans_entity2tuple(entity_ids, entity_labels)
X2 += len(entity_pred.intersection(entity_true))
Y2 += len(entity_pred)
Z2 += len(entity_true)
f1, precision, recall = 2 * X1 / (Y1 + Z1), X1 / Y1, X1 / Z1
f2, precision2, recall2 = 2 * X2 / (Y2 + Z2), X2/ Y2, X2 / Z2
return f1, precision, recall, f2, precision2, recall2 | null |
20,762 | import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
maxlen = 256
categories_label2id = {k: i for i, k in enumerate(categories)}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
batch_token_ids, batch_labels = [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = categories_label2id['B-'+label]
labels[start + 1:end + 1] = categories_label2id['I-'+label]
batch_token_ids.append(token_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
return batch_token_ids, batch_label
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = categories_label2id['B-'+label]
labels[start + 1:end + 1] = categories_label2id['I-'+label]
batch_token_ids.append(token_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
return batch_token_ids, batch_labels | null |
20,763 | import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
def acc(y_pred, y_true):
y_pred = y_pred[0]
y_pred = torch.argmax(y_pred, dim=-1)
acc = torch.sum(y_pred.eq(y_true)).item() / y_true.numel()
return {'acc': acc} | null |
20,764 | import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
def predict(self, token_ids):
self.eval()
with torch.no_grad():
emission_score, attention_mask = self.forward(token_ids)
best_path = self.crf.decode(emission_score, attention_mask) # [btz, seq_len]
return best_path
model =
def trans_entity2tuple(scores):
'''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
'''
batch_entity_ids = set()
for i, one_samp in enumerate(scores):
entity_ids = []
for j, item in enumerate(one_samp):
flag_tag = categories_id2label[item.item()]
if flag_tag.startswith('B-'): # B
entity_ids.append([i, j, j, flag_tag[2:]])
elif len(entity_ids) == 0:
continue
elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:]==entity_ids[-1][-1]): # I
entity_ids[-1][-2] = j
elif len(entity_ids[-1]) > 0:
entity_ids.append([])
for i in entity_ids:
if i:
batch_entity_ids.add(tuple(i))
return batch_entity_ids
def evaluate(data):
X, Y, Z = 1e-10, 1e-10, 1e-10
X2, Y2, Z2 = 1e-10, 1e-10, 1e-10
for token_ids, label in tqdm(data):
scores = model.predict(token_ids) # [btz, seq_len]
attention_mask = label.gt(0)
# token粒度
X += (scores.eq(label) * attention_mask).sum().item()
Y += scores.gt(0).sum().item()
Z += label.gt(0).sum().item()
# entity粒度
entity_pred = trans_entity2tuple(scores)
entity_true = trans_entity2tuple(label)
X2 += len(entity_pred.intersection(entity_true))
Y2 += len(entity_pred)
Z2 += len(entity_true)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
f2, precision2, recall2 = 2 * X2 / (Y2 + Z2), X2/ Y2, X2 / Z2
return f1, precision, recall, f2, precision2, recall2 | null |
20,765 | import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import jieba.posseg as psg
from collections import Counter
import re
maxlen = 256
categories_label2id = {k: i for i, k in enumerate(categories)}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
psg_map =
def collate_fn(batch):
batch_token_ids, batch_psg_ids, batch_labels = [], [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens) # 第i个token在原始text中的区间
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = categories_label2id['B-'+label]
labels[start + 1:end + 1] = categories_label2id['I-'+label]
batch_token_ids.append(token_ids)
batch_labels.append(labels)
# 处理词性输入
seg = [(i, p) for word, p in psg.cut(d[0]) for i in word]
seg_word, seg_p = zip(*seg)
psg_ids = np.zeros(len(token_ids))
for i, j in enumerate(mapping):
if j:
start, end = j[0], j[-1] # token在原始text的首尾位置
# 校正
# token_new = (''.join(seg_word[start:end+1])).lower()
# assert re.sub('^##', '', tokens[i]) == token_new, f"{tokens[i]} -> {token_new}"
if start == end:
psg_ids[i] = psg_map.get(seg_p[start], 0) # 不在字典里给0
else:
psg_ids[i] = psg_map.get(Counter(seg_p[start:end+1]).most_common(1)[0][0], 0) # 取众数
batch_psg_ids.append(psg_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_psg_ids = torch.tensor(sequence_padding(batch_psg_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
return [batch_token_ids, batch_psg_ids], batch_labels | null |
20,766 | import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import jieba.posseg as psg
from collections import Counter
import re
def predict(self, token_ids, psg_ids):
self.eval()
with torch.no_grad():
emission_score, attention_mask = self.forward(token_ids, psg_ids)
best_path = self.crf.decode(emission_score, attention_mask) # [bts, seq_len]
return best_path
model =
model.compile(loss=Loss(), optimizer=optim.Adam(model.parameters(), lr=2e-5))
def trans_entity2tuple(scores):
'''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
'''
batch_entity_ids = set()
for i, one_samp in enumerate(scores):
entity_ids = []
for j, item in enumerate(one_samp):
flag_tag = categories_id2label[item.item()]
if flag_tag.startswith('B-'): # B
entity_ids.append([i, j, j, flag_tag[2:]])
elif len(entity_ids) == 0:
continue
elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:]==entity_ids[-1][-1]): # I
entity_ids[-1][-2] = j
elif len(entity_ids[-1]) > 0:
entity_ids.append([])
for i in entity_ids:
if i:
batch_entity_ids.add(tuple(i))
return batch_entity_ids
def evaluate(data):
X, Y, Z = 1e-10, 1e-10, 1e-10
X2, Y2, Z2 = 1e-10, 1e-10, 1e-10
for (token_ids, psg_ids), label in tqdm(data):
scores = model.predict(token_ids, psg_ids) # [btz, seq_len]
attention_mask = label.gt(0)
# token粒度
X += (scores.eq(label) * attention_mask).sum().item()
Y += scores.gt(0).sum().item()
Z += label.gt(0).sum().item()
# entity粒度
entity_pred = trans_entity2tuple(scores)
entity_true = trans_entity2tuple(label)
X2 += len(entity_pred.intersection(entity_true))
Y2 += len(entity_pred)
Z2 += len(entity_true)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
f2, precision2, recall2 = 2 * X2 / (Y2 + Z2), X2/ Y2, X2 / Z2
return f1, precision, recall, f2, precision2, recall2 | null |
20,767 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import re
import json
import os
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
D.append(re.split(' +', l.strip()))
return
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(filename)` to solve the following problem:
加载数据 单条格式:[词1, 词2, 词3, ...]
Here is the function:
def load_data(filename):
"""加载数据
单条格式:[词1, 词2, 词3, ...]
"""
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
D.append(re.split(' +', l.strip()))
return D | 加载数据 单条格式:[词1, 词2, 词3, ...] |
20,768 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import re
import json
import os
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
标签含义 0: 单字词; 1: 多字词首字; 2: 多字词中间; 3: 多字词末字
Here is the function:
def collate_fn(batch):
"""标签含义
0: 单字词; 1: 多字词首字; 2: 多字词中间; 3: 多字词末字
"""
batch_token_ids, batch_labels = [], []
for item in batch:
token_ids, labels = [tokenizer._token_start_id], [0]
for w in item:
w_token_ids = tokenizer.encode(w)[0][1:-1]
if len(token_ids) + len(w_token_ids) < maxlen:
token_ids += w_token_ids
if len(w_token_ids) == 1:
labels += [0]
else:
labels += [1] + [2] * (len(w_token_ids) - 2) + [3]
else:
break
token_ids += [tokenizer._token_end_id]
labels += [0]
batch_token_ids.append(token_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
return batch_token_ids, batch_labels | 标签含义 0: 单字词; 1: 多字词首字; 2: 多字词中间; 3: 多字词末字 |
20,769 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import re
import json
import os
def acc(y_pred, y_true):
y_pred = y_pred[0]
y_pred = torch.argmax(y_pred, dim=-1)
acc = torch.sum(y_pred.eq(y_true)).item() / y_true.numel()
return {'acc': acc} | null |
20,770 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import re
import json
import os
def segmenter(text):
tokens = tokenizer.tokenize(text)
while len(tokens) > 512:
tokens.pop(-2)
mapping = tokenizer.rematch(text, tokens)
token_ids = tokenizer.tokens_to_ids(tokens)
token_ids = torch.tensor([token_ids], dtype=torch.long, device=device)
labels = model.predict(token_ids)[0].cpu().numpy()
words = []
for i, label in enumerate(labels[1:-1]):
if label < 2 or len(words) == 0:
words.append([i + 1])
else:
words[-1].append(i + 1)
return [text[mapping[w[0]][0]:mapping[w[-1]][-1] + 1] for w in words]
The provided code snippet includes necessary dependencies for implementing the `simple_evaluate` function. Write a Python function `def simple_evaluate(data)` to solve the following problem:
简单的评测 该评测指标不等价于官方的评测指标,但基本呈正相关关系, 可以用来快速筛选模型。
Here is the function:
def simple_evaluate(data):
"""简单的评测
该评测指标不等价于官方的评测指标,但基本呈正相关关系,
可以用来快速筛选模型。
"""
total, right = 0., 0.
for w_true in tqdm(data):
w_pred = segmenter(''.join(w_true))
w_pred = set(w_pred)
w_true = set(w_true)
total += len(w_true)
right += len(w_true & w_pred)
return right / total | 简单的评测 该评测指标不等价于官方的评测指标,但基本呈正相关关系, 可以用来快速筛选模型。 |
20,771 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import re
import json
import os
with open(filename, encoding='utf-8') as f:
for l in f:
D.append(re.split(' +', l.strip()))
def segmenter(text):
tokens = tokenizer.tokenize(text)
while len(tokens) > 512:
tokens.pop(-2)
mapping = tokenizer.rematch(text, tokens)
token_ids = tokenizer.tokens_to_ids(tokens)
token_ids = torch.tensor([token_ids], dtype=torch.long, device=device)
labels = model.predict(token_ids)[0].cpu().numpy()
words = []
for i, label in enumerate(labels[1:-1]):
if label < 2 or len(words) == 0:
words.append([i + 1])
else:
words[-1].append(i + 1)
return [text[mapping[w[0]][0]:mapping[w[-1]][-1] + 1] for w in words]
The provided code snippet includes necessary dependencies for implementing the `predict_to_file` function. Write a Python function `def predict_to_file(in_file, out_file)` to solve the following problem:
预测结果到文件,便于用官方脚本评测 使用示例: predict_to_file('/root/icwb2-data/testing/pku_test.utf8', 'myresult.txt') 官方评测代码示例: data_dir="/root/icwb2-data" $data_dir/scripts/score $data_dir/gold/pku_training_words.utf8 $data_dir/gold/pku_test_gold.utf8 myresult.txt > myscore.txt (执行完毕后查看myscore.txt的内容末尾)
Here is the function:
def predict_to_file(in_file, out_file):
"""预测结果到文件,便于用官方脚本评测
使用示例:
predict_to_file('/root/icwb2-data/testing/pku_test.utf8', 'myresult.txt')
官方评测代码示例:
data_dir="/root/icwb2-data"
$data_dir/scripts/score $data_dir/gold/pku_training_words.utf8 $data_dir/gold/pku_test_gold.utf8 myresult.txt > myscore.txt
(执行完毕后查看myscore.txt的内容末尾)
"""
fw = open(out_file, 'w', encoding='utf-8')
with open(in_file, encoding='utf-8') as fr:
for l in tqdm(fr):
l = l.strip()
if l:
l = ' '.join(segmenter(l))
fw.write(l + '\n')
fw.close() | 预测结果到文件,便于用官方脚本评测 使用示例: predict_to_file('/root/icwb2-data/testing/pku_test.utf8', 'myresult.txt') 官方评测代码示例: data_dir="/root/icwb2-data" $data_dir/scripts/score $data_dir/gold/pku_training_words.utf8 $data_dir/gold/pku_test_gold.utf8 myresult.txt > myscore.txt (执行完毕后查看myscore.txt的内容末尾) |
20,772 | import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.layers import LayerNorm
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from collections import defaultdict, deque
from sklearn.metrics import precision_recall_fscore_support
text = "-".join([str(i) for i in index])
text = text + "-#-{}".format(type)
return tex
def convert_index_to_text(index, type):
text = "-".join([str(i) for i in index])
text = text + "-#-{}".format(type)
return text | null |
20,773 | import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.layers import LayerNorm
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from collections import defaultdict, deque
from sklearn.metrics import precision_recall_fscore_support
index, type = text.split("-#-")
index = [int(x) for x in index.split("-")]
return index, int(type
def convert_text_to_index(text):
index, type = text.split("-#-")
index = [int(x) for x in index.split("-")]
return index, int(type) | null |
20,774 | import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.layers import LayerNorm
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from collections import defaultdict, deque
from sklearn.metrics import precision_recall_fscore_support
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(data):
tokens_ids, pieces2word, dist_inputs, grid_labels, grid_mask2d, _entity_text = map(list, zip(*data))
sent_length = torch.tensor([i.shape[0] for i in pieces2word], dtype=torch.long, device=device)
# max_wordlen: word长度,非token长度,max_tokenlen:token长度
max_wordlen = torch.max(sent_length).item()
max_tokenlen = np.max([len(x) for x in tokens_ids])
tokens_ids = torch.tensor(sequence_padding(tokens_ids), dtype=torch.long, device=device)
batch_size = tokens_ids.size(0)
def fill(data, new_data):
for j, x in enumerate(data):
new_data[j, :x.shape[0], :x.shape[1]] = torch.tensor(x, dtype=torch.long, device=device)
return new_data
dis_mat = torch.zeros((batch_size, max_wordlen, max_wordlen), dtype=torch.long, device=device)
dist_inputs = fill(dist_inputs, dis_mat)
labels_mat = torch.zeros((batch_size, max_wordlen, max_wordlen), dtype=torch.long, device=device)
grid_labels = fill(grid_labels, labels_mat)
mask2d_mat = torch.zeros((batch_size, max_wordlen, max_wordlen), dtype=torch.bool, device=device)
grid_mask2d = fill(grid_mask2d, mask2d_mat)
sub_mat = torch.zeros((batch_size, max_wordlen, max_tokenlen), dtype=torch.bool, device=device)
pieces2word = fill(pieces2word, sub_mat)
return [tokens_ids, pieces2word, dist_inputs, sent_length, grid_mask2d], [grid_labels, grid_mask2d, _entity_text] | null |
20,775 |
def collate_fn(batch):
batch_token_ids, batch_start_labels, batch_end_labels = [], [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=max_len)[1:] # 不保留[CLS]
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
start_ids = [0] * len(tokens)
end_ids = [0] * len(tokens)
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
start_ids[start] = categories_label2id[label]
end_ids[end] = categories_label2id[label]
batch_token_ids.append(token_ids)
batch_start_labels.append(start_ids)
batch_end_labels.append(end_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_start_labels = torch.tensor(sequence_padding(batch_start_labels), dtype=torch.long, device=device)
batch_end_labels = torch.tensor(sequence_padding(batch_end_labels), dtype=torch.long, device=device)
batch_mask = batch_token_ids.gt(0).long()
return [batch_token_ids], [batch_mask, batch_start_labels, batch_end_labels] | null |
20,776 |
def evaluate(data):
X, Y, Z = 0, 1e-10, 1e-10
for token_ids, labels in tqdm(data, desc='Evaluation'):
start_logit, end_logit = model.predict(token_ids) # [btz, seq_len, 2]
mask, start_ids, end_ids = labels
# entity粒度
entity_pred = span_decode(start_logit, end_logit, mask)
entity_true = span_decode(start_ids, end_ids)
X += len(entity_pred.intersection(entity_true))
Y += len(entity_pred)
Z += len(entity_true)
f1, precision, recall = 2 * X / (Y + Z), X/ Y, X / Z
return f1, precision, recall | null |
20,777 | import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.nn as nn
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.layers import LayerNorm
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support
from torch_scatter import scatter_max
label2idx = {'LOC':0, 'PER':1, 'ORG':2}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(data):
def collate_fn(data):
tokens_ids, indexes, matrix, ent_target = map(list, zip(*data))
tokens_ids = torch.tensor(sequence_padding(tokens_ids), dtype=torch.long, device=device)
indexes = torch.tensor(sequence_padding(indexes), dtype=torch.long, device=device)
seq_len = max([i.shape[0] for i in matrix])
matrix_new = np.ones((len(tokens_ids), seq_len, seq_len, len(label2idx)), dtype=np.int8) * -100
for i in range(len(tokens_ids)):
matrix_new[i, :len(matrix[i][0]), :len(matrix[i][0]), :] = matrix[i]
matrix = torch.tensor(matrix_new, dtype=torch.long, device=device)
return [tokens_ids, indexes], [matrix, ent_target] | null |
20,778 |
def collate_fn(batch):
batch_token_ids, batch_start_labels, batch_end_labels = [], [], []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=max_c_len)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
# 按照实体类型整理实体
start_labels = np.zeros((len(tokens), num_labels))
end_labels = np.zeros((len(tokens), num_labels))
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
start_labels[start, categories[label]] = 1
end_labels[end, categories[label]] = 1
batch_token_ids.append(tokenizer.tokens_to_ids(tokens))
batch_start_labels.append(start_labels)
batch_end_labels.append(end_labels)
batch_label_token_ids = tokenizer.encode(categories_annotations.values(), maxlen=max_q_len)[0]
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) # [btz, seg_len]
batch_start_labels = torch.tensor(sequence_padding(batch_start_labels), dtype=torch.long, device=device) # [btz, seg_len]
batch_end_labels = torch.tensor(sequence_padding(batch_end_labels), dtype=torch.long, device=device) # [btz, seg_len]
batch_label_token_ids = torch.tensor(sequence_padding(batch_label_token_ids), dtype=torch.long, device=device) # [c, label_len]
batch_span_labels = None
masks = (batch_token_ids != tokenizer._token_pad_id).long()
return [batch_token_ids, batch_label_token_ids], [masks, batch_start_labels, batch_end_labels, batch_span_labels] | null |
20,779 |
def evaluate(data):
X, Y, Z = 0, 1e-10, 1e-10
for inputs, labels in tqdm(data, desc='Evaluation'):
start_logit, end_logit, span_logits = model.predict(inputs)
mask, start_labels, end_labels, span_labels = labels
# entity粒度
entity_pred = decode(start_logit, end_logit, mask)
entity_true = decode(start_labels, end_labels)
X += len(entity_pred.intersection(entity_true))
Y += len(entity_pred)
Z += len(entity_true)
f1, precision, recall = 2 * X / (Y + Z), X/ Y, X / Z
return f1, precision, recall | null |
20,780 |
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_start_labels, batch_end_labels = [], [], [], []
batch_ent_type = []
for d in batch:
tokens_b = tokenizer.tokenize(d[0], maxlen=max_c_len)[1:] # 不保留[CLS]
mapping = tokenizer.rematch(d[0], tokens_b)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
# 按照实体类型整理实体
label_dict = defaultdict(list)
for start, end, label in d[1:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
label_dict[label].append((start, end))
# 遍历实体类型,query为tokens_a, context为tokens_b
# 样本组成:[CLS] + tokens_a + [SEP] + tokens_b + [SEP]
for _type in categories:
start_ids = [0] * len(tokens_b)
end_ids = [0] * len(tokens_b)
text_a = ent2query[_type]
tokens_a = tokenizer.tokenize(text_a, maxlen=max_q_len)
for _label in label_dict[_type]:
start_ids[_label[0]] = 1
end_ids[_label[1]] = 1
start_ids = [0] * len(tokens_a) + start_ids
end_ids = [0] * len(tokens_a) + end_ids
token_ids = tokenizer.tokens_to_ids(tokens_a) + tokenizer.tokens_to_ids(tokens_b)
segment_ids = [0] * len(tokens_a) + [1] * len(tokens_b)
assert len(start_ids) == len(end_ids) == len(token_ids) == len(segment_ids)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_start_labels.append(start_ids)
batch_end_labels.append(end_ids)
batch_ent_type.append(_type)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_start_labels = torch.tensor(sequence_padding(batch_start_labels), dtype=torch.long, device=device)
batch_end_labels = torch.tensor(sequence_padding(batch_end_labels), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_segment_ids, batch_start_labels, batch_end_labels, batch_ent_type] | null |
20,781 |
def evaluate(data):
X, Y, Z = 0, 1e-10, 1e-10
for (token_ids, segment_ids), labels in tqdm(data, desc='Evaluation'):
start_logit, end_logit = model.predict([token_ids, segment_ids]) # [btz, seq_len, 2]
mask, start_ids, end_ids, ent_type = labels
# entity粒度
entity_pred = mrc_decode(start_logit, end_logit, ent_type, mask)
entity_true = mrc_decode(start_ids, end_ids, ent_type)
X += len(entity_pred.intersection(entity_true))
Y += len(entity_pred)
Z += len(entity_true)
f1, precision, recall = 2 * X / (Y + Z), X/ Y, X / Z
return f1, precision, recall | null |
20,782 | import argparse
import json
import pandas as pd
from tqdm import tqdm
from model import BertClient
def create_document(doc, emb, index_name):
return {
'_op_type': 'index',
'_index': index_name,
'text': doc['text'],
'title': doc['title'],
'text_vector': emb
} | null |
20,783 | import argparse
import json
import pandas as pd
from tqdm import tqdm
from model import BertClient
def load_dataset(path):
docs = []
df = pd.read_csv(path, encoding='utf-8')
for row in df.iterrows():
series = row[1]
doc = {
'title': series.Title,
'text': series.Description
}
if len(series.Description) < 50:
continue
docs.append(doc)
return docs | null |
20,784 | import argparse
import json
import pandas as pd
from tqdm import tqdm
from model import BertClient
bc = BertClient(batch_size=128, use_tqdm=False)
The provided code snippet includes necessary dependencies for implementing the `bulk_predict` function. Write a Python function `def bulk_predict(docs, batch_size=256)` to solve the following problem:
Predict bert embeddings.
Here is the function:
def bulk_predict(docs, batch_size=256):
"""Predict bert embeddings."""
for i in tqdm(range(0, len(docs), batch_size)):
batch_docs = docs[i: i+batch_size]
embeddings = bc.encode([doc['text'] for doc in batch_docs])
for emb in embeddings:
yield emb | Predict bert embeddings. |
20,785 | import argparse
import json
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
def load_dataset(path):
with open(path, 'r', encoding='utf-8') as f:
return [json.loads(line) for line in f] | null |
20,786 | import os
from pprint import pprint
from flask import Flask, render_template, jsonify, request
from elasticsearch import Elasticsearch
from src.model import BertClient
SEARCH_SIZE = 10
INDEX_NAME = 'jobsearch'
def index():
return render_template('index.html')
"
=
class BertClient(object):
def __init__(self, batch_size=10, maxlen=128, use_tqdm=False):
self.model = build_transformer_model(config_path, checkpoint_path, segment_vocab_size=0, with_pool=True, output_all_encoded_layers=True) # 建立模型,加载权重
self.model.to(device)
self.model.eval()
self.batch_size=batch_size
self.maxlen = maxlen
self.use_tqdm = use_tqdm
def encode(self, queries):
token_ids = tokenizer.encode(queries, maxlen=self.maxlen)[0]
token_ids = torch.tensor(sequence_padding(token_ids), device=device)
dataloader = DataLoader(TensorDataset(token_ids), batch_size=self.batch_size)
reps = []
dataloader = tqdm(dataloader) if self.use_tqdm else dataloader
for batch in dataloader:
hidden_state1, pooler = self.model(batch)
rep = get_pool_emb(hidden_state1, pooler, batch[0].gt(0).long(), 'first-last-avg')
reps.extend(rep.cpu().numpy().tolist())
return reps
def analyzer():
bc = BertClient()
client = Elasticsearch()
query = request.args.get('q')
query_vector = bc.encode([query])[0]
script_query = {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, doc['text_vector']) + 1.0",
"params": {"query_vector": query_vector}
}
}
}
response = client.search(
index=INDEX_NAME,
body={
"size": SEARCH_SIZE,
"query": script_query,
"_source": {"includes": ["title", "text"]}
}
)
print(query)
pprint(response)
return jsonify(response) | null |
20,787 | from bert4torch.tokenizers import Tokenizer
from bert4torch.snippets import sequence_padding
import numpy as np
def preprocess(text_list):
batch_token_ids, batch_segment_ids = [], []
for text in text_list:
token_ids, segment_ids = tokenizer.encode(text, maxlen=512)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = sequence_padding(batch_token_ids, length=512)
batch_segment_ids = sequence_padding(batch_segment_ids, length=512)
return batch_token_ids, batch_segment_ids | null |
20,788 | from bert4torch.tokenizers import Tokenizer
from bert4torch.snippets import sequence_padding
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `postprocess` function. Write a Python function `def postprocess(res)` to solve the following problem:
后处理
Here is the function:
def postprocess(res):
'''后处理
'''
mapping = {0: 'negtive', 1: 'positive'}
result = []
for item in res['outputs']:
prob = np.array(item['data']).reshape(item['shape'])
pred = prob.argmax(axis=-1)
result.append([mapping[i] for i in pred])
return result | 后处理 |
20,789 | numpy as np
import torch
import torch.nn as nn
from bert4torch.snippets import get_pool_emb
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
import time
from tqdm import tqdm
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() | null |
20,790 | from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, get_pool_emb
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import WebServing
synonyms_generator = SynonymsGenerator(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=maxlen, device=device)
def cal_sen_emb(text_list):
'''输入text的list,计算sentence的embedding
'''
X, S = [], []
for t in text_list:
x, s = tokenizer.encode(t)
X.append(x)
S.append(s)
X = torch.tensor(sequence_padding(X), dtype=torch.long, device=device)
S = torch.tensor(sequence_padding(S), dtype=torch.long, device=device)
_, Z = model.predict([X, S])
return Z
The provided code snippet includes necessary dependencies for implementing the `gen_synonyms` function. Write a Python function `def gen_synonyms(text, n=100, k=20)` to solve the following problem:
含义: 产生sent的n个相似句,然后返回最相似的k个。 做法:用seq2seq生成,并用encoder算相似度并排序。
Here is the function:
def gen_synonyms(text, n=100, k=20):
""""含义: 产生sent的n个相似句,然后返回最相似的k个。
做法:用seq2seq生成,并用encoder算相似度并排序。
"""
r = synonyms_generator.generate(text, n)
r = [i for i in set(r) if i != text] # 不和原文相同
r = [text] + r
Z = cal_sen_emb(r)
Z /= (Z**2).sum(dim=1, keepdims=True)**0.5
argsort = torch.matmul(Z[1:], -Z[0]).argsort()
return [r[i + 1] for i in argsort[:k]] | 含义: 产生sent的n个相似句,然后返回最相似的k个。 做法:用seq2seq生成,并用encoder算相似度并排序。 |
20,791 | import logging
import logging.config
from typing import Optional, Text
from src.utils.configs import Configuration
TRACE_LOG = "tracelogger"
def get_trace_log():
return logging.getLogger(TRACE_LOG) | null |
20,792 | import json
def send_msg(requestData):
url = 'http://localhost:8082/recommendinfo'
headers = {'content-type': 'application/json'}
ret = requests.post(url, json=requestData, headers=headers, stream=True)
if ret.status_code==200:
text = json.loads(ret.text)
return text | null |
20,793 | from sanic import Sanic
from typing import Optional, Text
import src.config.constants as constants
import src.utils.loggers as loggers
import json
def create_app(confs: Optional[Text] = None):
def start_server(confs: Optional[Text] = None, port: int = constants.DEFAULT_SERVER_PORT):
server = create_app(confs)
protocol = "http"
loggers.get_out_log().info(
"Starting server on "
"{}".format(constants.DEFAULT_SERVER_FORMAT.format(protocol, port))
)
server.run(host='0.0.0.0', port=port, debug=False, workers=1) | null |
20,794 | from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
from bert4torch.losses import UDALoss
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import random
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
def add_noise(token_ids, del_ratio=0.3):
'''这里用随机删除做简单示例,实际中可以使用增删改等多种noise方案
'''
n = len(token_ids)
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
return list(np.array(token_ids)[keep_or_not])
# batch_token_ids包含三部分,第一部分是有监督数据,第二部分是领域类的无监督数据,第三部分是无监督数据经数据增强后的数据
batch_token_ids, batch_labels = [[], [], []], []
for text, label in batch:
token_ids, _ = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids[0].append(token_ids)
batch_labels.append([label])
# 无监督部分
unsup_text = random.choice(unsup_dataset) # 随机挑一个无监督数据
token_ids, _ = tokenizer.encode(unsup_text, maxlen=maxlen)
batch_token_ids[1].append(token_ids)
batch_token_ids[2].append(token_ids[:1] + add_noise(token_ids[1:-1]) + token_ids[-1:]) # 无监督数据增强
batch_token_ids = [j for i in batch_token_ids for j in i]
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten() | null |
20,795 | import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,796 | import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
model = Model().to(device)
The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(texts)` to solve the following problem:
单条样本推理
Here is the function:
def inference(texts):
"""单条样本推理
"""
for text in texts:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :]
segment_ids = torch.tensor(segment_ids, dtype=torch.long, device=device)[None, :]
logit = model.predict([token_ids, segment_ids])
y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy()
print(text, ' ----> ', y_pred) | 单条样本推理 |
20,797 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, DeepSpeedTrainer
from bert4torch.callbacks import Callback, Logger
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long)
batch_labels = torch.tensor(batch_labels, dtype=torch.long)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long)
batch_labels = torch.tensor(batch_labels, dtype=torch.long)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.