id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
20,900 | from glob import glob
import torch
from torch import nn
from bert4torch.snippets import DottableDict, ListDataset, sequence_padding
from bert4torch.models import BaseModel, build_transformer_model
from bert4torch.generation import SeqGeneration
from bert4torch.callbacks import Callback, Logger
from bert4torch.trainer import PPOTrainer
from trl import PPOConfig, set_seed
from utils import get_model_config, get_nbit_lora_model
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import json
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, **tokenizer_kwargs)
PROMPT_TEMPLATE = (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request.\n\n"
"### Instruction:\n{instruction}\n\n### Response: "
)
def collate_fn(batch):
batch_token_ids, batch_queries = [], []
for query, token_ids in batch:
batch_token_ids.append(token_ids)
batch_queries.append(query)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=pad_token_id, mode='pre'), dtype=torch.long, device=args.device)
return {'input_ids': batch_token_ids, 'query': batch_queries}, None
def preprocess_function(examples):
new_examples = []
for conversation in examples:
for message in conversation:
instruction = message['value']
input = message['from']
if input:
instruction = instruction + "\n" + input
source = PROMPT_TEMPLATE.format_map({"instruction": instruction})
tokenized_question = tokenizer(source, truncation=True, max_length=max_source_length)["input_ids"]
new_examples.append((source, tokenized_question))
return new_examples | null |
20,901 | from glob import glob
import torch
from torch import nn
from bert4torch.snippets import DottableDict, ListDataset, sequence_padding
from bert4torch.models import BaseModel, build_transformer_model
from bert4torch.generation import SeqGeneration
from bert4torch.callbacks import Callback, Logger
from bert4torch.trainer import PPOTrainer
from trl import PPOConfig, set_seed
from utils import get_model_config, get_nbit_lora_model
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import json
args.steps_per_epoch = None
args.epochs = 1
args.data_path = 'E:/Github/MedicalGPT/data/finetune/**/*.jsonl'
args.device = "cuda" if torch.cuda.is_available() else "cpu"
args.use_fast_tokenizer = False
args.seed = 1234
args.max_source_length = 256
args.max_target_length = 256
args.reward_model_name_or_path = "E:/pretrain_ckpt/deberta/OpenAssistant@reward-model-deberta-v3-large-v2"
args.load_in_8bit = False
args.max_steps = 100
args.learning_rate = 1e-5
args.batch_size = 8
args.gradient_accumulation_steps = 1
args.target_kl = 0.1
args.init_kl_coef = 0.2
args.adap_kl_ctrl = True
args.trust_remote_code = True
args.use_lora = False
args.load_in_nbit = None
args.model_type, args.model_name_or_path, args.config_path, args.checkpoint_path = get_model_config('bloom')
args.model_type == 'bloom':
args.use_fast_tokenizer = Tru {
"use_fast": args.use_fast_tokenizer,
"trust_remote_code": args.trust_remote_code,
}
if args.model_type == "llama" and tokenizer.pad_token is None:
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
pad_token_id = tokenizer.pad_token_id or -100
def collate_fn(batch):
def collate_fn(batch):
batch_token_ids, batch_queries = [], []
for query, token_ids in batch:
batch_token_ids.append(token_ids)
batch_queries.append(query)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=pad_token_id, mode='pre'), dtype=torch.long, device=args.device)
return {'input_ids': batch_token_ids, 'query': batch_queries}, None | null |
20,902 | from bert4torch.snippets import sequence_padding
from bert4torch.callbacks import Callback
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel
from transformers import AutoTokenizer
from bert4torch.snippets import ListDataset
from bert4torch.callbacks import Logger
from bert4torch.generation import SeqGeneration
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.trainer import PtuningV2Trainer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
import os
max_source_length = 64
max_target_length = 64
prefix = ''
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = AutoTokenizer.from_pretrained(dir_path.replace('/', '\\'), trust_remote_code=True)
def build_prompt(query, history):
if history_column is None:
prompt = query
else:
prompt = ""
for i, (old_query, answer) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, answer)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
return prompt
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(a_ids) > max_source_length - 1:
a_ids = a_ids[:max_source_length - 1]
if len(b_ids) > max_target_length - 2:
b_ids = b_ids[:max_target_length - 2]
input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)
context_length = input_ids.index(tokenizer.bos_token_id)
mask_position = context_length - 1
labels = [-100] * context_length + input_ids[mask_position+1:]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(a_ids) > max_source_length - 1:
a_ids = a_ids[:max_source_length - 1]
if len(b_ids) > max_target_length - 2:
b_ids = b_ids[:max_target_length - 2]
input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)
context_length = input_ids.index(tokenizer.bos_token_id)
mask_position = context_length - 1
labels = [-100] * context_length + input_ids[mask_position+1:]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels | null |
20,903 | from bert4torch.snippets import sequence_padding
from bert4torch.callbacks import Callback
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel
from transformers import AutoTokenizer
from bert4torch.snippets import ListDataset
from bert4torch.callbacks import Logger
from bert4torch.generation import SeqGeneration
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.trainer import PtuningV2Trainer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
import os
max_target_length = 64
prefix = ''
tokenizer = AutoTokenizer.from_pretrained(dir_path.replace('/', '\\'), trust_remote_code=True)
def build_prompt(query, history):
if history_column is None:
prompt = query
else:
prompt = ""
for i, (old_query, answer) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, answer)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
return prompt
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(a_ids) > max_source_length - 1:
a_ids = a_ids[:max_source_length - 1]
if len(b_ids) > max_target_length - 2:
b_ids = b_ids[:max_target_length - 2]
input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)
context_length = input_ids.index(tokenizer.bos_token_id)
mask_position = context_length - 1
labels = [-100] * context_length + input_ids[mask_position+1:]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels | null |
20,904 | torch
from bert4torch.models import build_transformer_model
from transformers import AutoTokenizer
import re
import json
import os
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer.padding_side = 'left'
model = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path).half()
model = model.quantize(quantization_method='cpm_kernels', quantization_bit=8).to(device
er_model(config_path=config_path, checkpoint_path=checkpoint_path).to(device)
model
s = json.load(open('E:/Github/bert4torch/examples/datasets/nbce_contexts.json', encoding='utf-8'))
The provided code snippet includes necessary dependencies for implementing the `generate` function. Write a Python function `def generate(max_tokens)` to solve the following problem:
Naive Bayes-based Context Extension 演示代码
Here is the function:
def generate(max_tokens):
"""Naive Bayes-based Context Extension 演示代码
"""
eop_list= []
inputs = tokenizer(batch, padding='longest', return_tensors='pt', return_attention_mask=True, skip_special_tokens=True).to(device)
input_ids = past_token_ids = inputs.input_ids
res = ''
n = input_ids.shape[0]
past_key_values = None
for i in range(max_tokens):
# 模型输出
#print(f'第{i+1}token开始输出')
logits, model_kwargs = model(input_ids,
past_key_values=past_key_values,
past_token_ids=past_token_ids,
use_states=True
)
past_key_values = model_kwargs['past_key_values']
torch.cuda.empty_cache()
# ===== 核心代码开始 =====
beta = 0.25
logits = logits[:, -1]
logits -= torch.max(logits,dim=1).values.reshape(logits.shape[0],-1)
probas = torch.nn.functional.softmax(logits.float(), dim=-1)
logits = probas.log()
k = (probas * logits).sum(dim=-1)[1:].argmax() + 1
logits_max = logits[k]
logits_uncond = logits[0]
logits = (1 + beta) * logits_max - beta * logits_uncond
# ===== 核心代码结束 =====
# 构建分布,采样
# tau = 0.01 # tau = 1是标准的随机采样,tau->0则是贪心搜索
probas = torch.nn.functional.softmax(logits[None], dim=-1)
next_tokens = torch.topk(probas,1).indices
s = tokenizer.convert_ids_to_tokens(next_tokens)
res += s[0]
if s[0] == '<eop>':
if len(eop_list)==3:
break
else:
eop_list.append('<eop>')
else:
eop_list = []
# prepare for next iteration
input_ids = next_tokens.tile(n, 1)
past_token_ids = torch.cat([past_token_ids, input_ids], dim=1)
print('==================question===================')
print(question)
print('===================answer====================')
print(re.sub('<n>+', '\n', re.sub('▁|<eop>|<sop>','',res)))
#['据公开报道,截至2021年6月,吉利德公司有约16,000名员工。', '领英计划裁员716人。', 'Pharmasset被吉利德以110亿美元收购。'] | Naive Bayes-based Context Extension 演示代码 |
20,905 | import build_transformer_model
from bert4torch.snippets import sequence_padding, text_segmentate
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel, DeepSpeedTrainer
from bert4torch.snippets import ListDataset
from bert4torch.generation import SeqGeneration
from bert4torch.callbacks import Callback, Logger
from bert4torch.optimizers import get_linear_schedule_with_warmup
from transformers import AutoTokenizer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
from peft import LoraConfig
import os
max_source_length = 64
max_target_length = 64
prefix = ''
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = AutoTokenizer.from_pretrained(dir_path, trust_remote_code=True)
def build_prompt(query, history):
if history_column is None:
prompt = query
else:
prompt = ""
for i, (old_query, answer) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, answer)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
return prompt
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(a_ids) > max_source_length - 1:
a_ids = a_ids[:max_source_length - 1]
if len(b_ids) > max_target_length - 2:
b_ids = b_ids[:max_target_length - 2]
input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)
context_length = input_ids.index(tokenizer.bos_token_id)
mask_position = context_length - 1
labels = [-100] * context_length + input_ids[mask_position+1:]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(a_ids) > max_source_length - 1:
a_ids = a_ids[:max_source_length - 1]
if len(b_ids) > max_target_length - 2:
b_ids = b_ids[:max_target_length - 2]
input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)
context_length = input_ids.index(tokenizer.bos_token_id)
mask_position = context_length - 1
labels = [-100] * context_length + input_ids[mask_position+1:]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels | null |
20,906 | import build_transformer_model
from bert4torch.snippets import sequence_padding, text_segmentate
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel, DeepSpeedTrainer
from bert4torch.snippets import ListDataset
from bert4torch.generation import SeqGeneration
from bert4torch.callbacks import Callback, Logger
from bert4torch.optimizers import get_linear_schedule_with_warmup
from transformers import AutoTokenizer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
from peft import LoraConfig
import os
max_target_length = 64
prefix = ''
tokenizer = AutoTokenizer.from_pretrained(dir_path, trust_remote_code=True)
def build_prompt(query, history):
if history_column is None:
prompt = query
else:
prompt = ""
for i, (old_query, answer) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, answer)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
return prompt
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(a_ids) > max_source_length - 1:
a_ids = a_ids[:max_source_length - 1]
if len(b_ids) > max_target_length - 2:
b_ids = b_ids[:max_target_length - 2]
input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)
context_length = input_ids.index(tokenizer.bos_token_id)
mask_position = context_length - 1
labels = [-100] * context_length + input_ids[mask_position+1:]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=-100), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels | null |
20,907 | import build_transformer_model
from bert4torch.snippets import sequence_padding, text_segmentate
from bert4torch.callbacks import Callback
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel
from transformers import AutoTokenizer
from bert4torch.snippets import ListDataset, seed_everything
from bert4torch.callbacks import Logger
from bert4torch.generation import SeqGeneration
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.trainer import PtuningV2Trainer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
import os
max_source_length = 64
max_target_length = 64
prefix = ''
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = AutoTokenizer.from_pretrained(dir_path.replace('/', '\\'), trust_remote_code=True)
def build_prompt(query, history=None):
if history is None:
history = []
prompt = ""
for i, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n\n问:{}\n\n答:{}\n\n".format(i + 1, old_query, response)
prompt += "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
return prompt
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True, max_length=max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True, max_length=max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True, max_length=max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True, max_length=max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels | null |
20,908 | import build_transformer_model
from bert4torch.snippets import sequence_padding, text_segmentate
from bert4torch.callbacks import Callback
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel
from transformers import AutoTokenizer
from bert4torch.snippets import ListDataset, seed_everything
from bert4torch.callbacks import Logger
from bert4torch.generation import SeqGeneration
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.trainer import PtuningV2Trainer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
import os
max_target_length = 64
prefix = ''
tokenizer = AutoTokenizer.from_pretrained(dir_path.replace('/', '\\'), trust_remote_code=True)
def build_prompt(query, history=None):
if history is None:
history = []
prompt = ""
for i, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n\n问:{}\n\n答:{}\n\n".format(i + 1, old_query, response)
prompt += "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
return prompt
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True, max_length=max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True, max_length=max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels | null |
20,909 | from bert4torch.models import build_transformer_model
from bert4torch.snippets import sequence_padding, text_segmentate
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import ListDataset
from bert4torch.generation import SeqGeneration
from bert4torch.callbacks import Callback, Logger
from bert4torch.optimizers import get_linear_schedule_with_warmup
from transformers import AutoTokenizer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
from peft import LoraConfig, prepare_model_for_kbit_training ort os
max_source_length = 64
max_target_length = 64
prefix = ''
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = AutoTokenizer.from_pretrained(dir_path, trust_remote_code=True)
def build_prompt(query, history=None):
def collate_train_fn(batch):
def collate_dev_fn(batch):
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True, max_length=max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True, max_length=max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels | null |
20,910 | from bert4torch.models import build_transformer_model
from bert4torch.snippets import sequence_padding, text_segmentate
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import ListDataset
from bert4torch.generation import SeqGeneration
from bert4torch.callbacks import Callback, Logger
from bert4torch.optimizers import get_linear_schedule_with_warmup
from transformers import AutoTokenizer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
from peft import LoraConfig, prepare_model_for_kbit_training ort os
max_target_length = 64
prefix = ''
tokenizer = AutoTokenizer.from_pretrained(dir_path, trust_remote_code=True)
def build_prompt(query, history=None):
if history is None:
history = []
prompt = ""
for i, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n\n问:{}\n\n答:{}\n\n".format(i + 1, old_query, response)
prompt += "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
return prompt
def collate_train_fn(batch):
batch_token_ids, batch_labels = [], []
for query, answer, history in batch:
prompt = build_prompt(query, history)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=True, truncation=True, max_length=max_source_length)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False, truncation=True, max_length=max_target_length)
context_length = len(a_ids)
input_ids = a_ids + b_ids + [tokenizer.eos_token_id]
labels = [tokenizer.pad_token_id] * context_length + b_ids + [tokenizer.eos_token_id]
batch_token_ids.append(input_ids)
batch_labels.append(labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
return [batch_token_ids], batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
from transformers import BitsAndBytesConfig
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels | null |
20,911 | import build_transformer_model
from bert4torch.snippets import sequence_padding, text_segmentate
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import ListDataset
from bert4torch.generation import SeqGeneration
from bert4torch.callbacks import Callback, Logger
from bert4torch.optimizers import get_linear_schedule_with_warmup
from transformers import AutoTokenizer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
import pandas as pd
from peft import LoraConfig, prepare_model_for_kbit_training ort os
max_source_length = 256
prefix = ''
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = AutoTokenizer.from_pretrained(dir_path, use_fast=False)
tokenizer.pad_token_id = 0
def build_prompt(query, answer=None, history=[]):
prompt = ""
for old_query, old_answer in history:
prompt += "<s>Human: {}\n</s><s>Assistant: {}\n</s>".format(old_query, old_answer)
prompt += "<s>Human: {}\n</s><s>Assistant: ".format(query)
if answer is not None:
prompt += answer + "\n</s>"
return prompt
def collate_train_fn(batch):
batch_token_ids = []
for query, answer, history in batch:
prompt = prefix + build_prompt(query, answer, history)
token_ids = tokenizer(text_target=prompt, max_length=max_source_length, truncation=True)['input_ids']
batch_token_ids.append(token_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
return [batch_token_ids], batch_token_ids
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, None, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
def collate_train_fn(batch):
batch_token_ids = []
for query, answer, history in batch:
prompt = prefix + build_prompt(query, answer, history)
token_ids = tokenizer(text_target=prompt, max_length=max_source_length, truncation=True)['input_ids']
batch_token_ids.append(token_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
return [batch_token_ids], batch_token_ids | null |
20,912 | import build_transformer_model
from bert4torch.snippets import sequence_padding, text_segmentate
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import ListDataset
from bert4torch.generation import SeqGeneration
from bert4torch.callbacks import Callback, Logger
from bert4torch.optimizers import get_linear_schedule_with_warmup
from transformers import AutoTokenizer
import json
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import numpy as np
from tqdm import tqdm
import pandas as pd
from peft import LoraConfig, prepare_model_for_kbit_training ort os
max_target_length = 256
prefix = ''
tokenizer = AutoTokenizer.from_pretrained(dir_path, use_fast=False)
tokenizer.pad_token_id = 0
def build_prompt(query, answer=None, history=[]):
prompt = ""
for old_query, old_answer in history:
prompt += "<s>Human: {}\n</s><s>Assistant: {}\n</s>".format(old_query, old_answer)
prompt += "<s>Human: {}\n</s><s>Assistant: ".format(query)
if answer is not None:
prompt += answer + "\n</s>"
return prompt
def collate_train_fn(batch):
batch_token_ids = []
for query, answer, history in batch:
prompt = prefix + build_prompt(query, answer, history)
token_ids = tokenizer(text_target=prompt, max_length=max_source_length, truncation=True)['input_ids']
batch_token_ids.append(token_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
return [batch_token_ids], batch_token_ids
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, None, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels
def collate_dev_fn(batch):
batch_prompt, batch_labels = [], []
for query, labels, history in batch:
batch_prompt.append(prefix + build_prompt(query, None, history))
label_ids = tokenizer(text_target=labels, max_length=max_target_length, truncation=True)['input_ids']
batch_labels.append(tokenizer.decode(label_ids, skip_special_tokens=True))
return batch_prompt, batch_labels | null |
20,913 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding
from bert4torch.callbacks import Callback
from bert4torch.optimizers import get_linear_schedule_with_warmup
import json
import codecs
import numpy as np
from tqdm import tqdm
import jieba
import editdistance
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch import nn, optim
import re
def read_data(data_file, table_file):
data, tables = [], {}
with open(data_file, 'r', encoding='utf-8') as f:
for l in f:
data.append(json.loads(l))
with open(table_file, 'r', encoding='utf-8') as f:
for l in f:
l = json.loads(l)
d = {}
d['headers'] = l['header']
d['header2id'] = {j: i for i, j in enumerate(d['headers'])}
d['content'] = {}
d['all_values'] = set()
rows = np.array(l['rows'])
for i, h in enumerate(d['headers']):
d['content'][h] = set(rows[:, i])
d['all_values'].update(d['content'][h])
d['all_values'] = set([i for i in d['all_values'] if hasattr(i, '__len__')])
tables[l['id']] = d
return data, tables | null |
20,914 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding
from bert4torch.callbacks import Callback
from bert4torch.optimizers import get_linear_schedule_with_warmup
import json
import codecs
import numpy as np
from tqdm import tqdm
import jieba
import editdistance
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch import nn, optim
import re
def most_similar(s, slist):
"""从词表中找最相近的词(当无法全匹配的时候)
"""
if len(slist) == 0:
return s
scores = [editdistance.eval(s, t) for t in slist]
return slist[np.argmin(scores)]
The provided code snippet includes necessary dependencies for implementing the `most_similar_2` function. Write a Python function `def most_similar_2(w, s)` to solve the following problem:
从句子s中找与w最相近的片段, 借助分词工具和ngram的方式尽量精确地确定边界。
Here is the function:
def most_similar_2(w, s):
"""从句子s中找与w最相近的片段,
借助分词工具和ngram的方式尽量精确地确定边界。
"""
sw = jieba.lcut(s)
sl = list(sw)
sl.extend([''.join(i) for i in zip(sw, sw[1:])])
sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:])])
return most_similar(w, sl) | 从句子s中找与w最相近的片段, 借助分词工具和ngram的方式尽量精确地确定边界。 |
20,915 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding
from bert4torch.callbacks import Callback
from bert4torch.optimizers import get_linear_schedule_with_warmup
import json
import codecs
import numpy as np
from tqdm import tqdm
import jieba
import editdistance
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch import nn, optim
import re
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
x1, xm, h, hm, sel, conn, csel, cop = zip(*[i for i in batch if i])
x1 = torch.tensor(sequence_padding(x1), dtype=torch.long, device=device)
xm = torch.tensor(sequence_padding(xm, length=x1.shape[1]), dtype=torch.long, device=device)
h = torch.tensor(sequence_padding(h), dtype=torch.long, device=device)
hm = torch.tensor(sequence_padding(hm), dtype=torch.long, device=device)
sel = torch.tensor(sequence_padding(sel), dtype=torch.long, device=device)
conn = torch.tensor(sequence_padding(conn), dtype=torch.long, device=device)
csel = torch.tensor(sequence_padding(csel, length=x1.shape[1]), dtype=torch.long, device=device)
cop = torch.tensor(sequence_padding(cop, length=x1.shape[1]), dtype=torch.long, device=device)
return [x1, h, hm], [sel, conn, csel, cop, xm, hm] | null |
20,916 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding
from bert4torch.callbacks import Callback
from bert4torch.optimizers import get_linear_schedule_with_warmup
import json
import codecs
import numpy as np
from tqdm import tqdm
import jieba
import editdistance
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch import nn, optim
import re
num_agg = 7_op = 5_cond_conn_op = 3
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = OurTokenizer(token_dict)
def most_similar(s, slist):
"""从词表中找最相近的词(当无法全匹配的时候)
"""
if len(slist) == 0:
return s
scores = [editdistance.eval(s, t) for t in slist]
return slist[np.argmin(scores)]
model = Model().to(device)
model.compile(
loss=MyLoss(),
optimizer=optimizer,
scheduler=scheduler
)
The provided code snippet includes necessary dependencies for implementing the `nl2sql` function. Write a Python function `def nl2sql(question, table)` to solve the following problem:
输入question和headers,转SQL
Here is the function:
def nl2sql(question, table):
"""输入question和headers,转SQL
"""
x1 = tokenizer.encode(question)[0]
h = []
for i in table['headers']:
_x1 = tokenizer.encode(i)[0]
h.append(len(x1))
x1.extend(_x1)
hm = [1] * len(h)
pconn, psel, pcop, pcsel = model.predict([
torch.tensor([x1], dtype=torch.long, device=device),
torch.tensor([h], dtype=torch.long, device=device),
torch.tensor([hm], dtype=torch.long, device=device)
])
pconn, psel, pcop, pcsel = pconn.cpu().numpy(), psel.cpu().numpy(), pcop.cpu().numpy(), pcsel.cpu().numpy()
R = {'agg': [], 'sel': []}
for i, j in enumerate(psel[0].argmax(1)):
if j != num_agg - 1: # num_agg-1类是不被select的意思
R['sel'].append(i)
R['agg'].append(int(j))
conds = []
v_op = -1
for i, j in enumerate(pcop[0, :len(question)+1].argmax(1)):
# 这里结合标注和分类来预测条件
if j != num_op - 1:
if v_op != j:
if v_op != -1:
v_end = v_start + len(v_str)
csel = pcsel[0][v_start: v_end].mean(0).argmax()
conds.append((csel, v_op, v_str))
v_start = i
v_op = j
v_str = question[i - 1]
else:
v_str += question[i - 1]
elif v_op != -1:
v_end = v_start + len(v_str)
csel = pcsel[0][v_start: v_end].mean(0).argmax()
conds.append((csel, v_op, v_str))
v_op = -1
R['conds'] = set()
for i, j, k in conds:
if re.findall('[^\d\.]', k):
j = 2 # 非数字只能用等号
if j == 2:
if k not in table['all_values']:
# 等号的值必须在table出现过,否则找一个最相近的
k = most_similar(k, list(table['all_values']))
h = table['headers'][i]
# 然后检查值对应的列是否正确,如果不正确,直接修正列名
if k not in table['content'][h]:
for r, v in table['content'].items():
if k in v:
i = table['header2id'][r]
break
R['conds'].add((int(i), int(j), str(k)))
R['conds'] = list(R['conds'])
if len(R['conds']) <= 1: # 条件数少于等于1时,条件连接符直接为0
R['cond_conn_op'] = 0
else:
R['cond_conn_op'] = 1 + int(pconn[0, 1:].argmax()) # 不能是0
return R | 输入question和headers,转SQL |
20,917 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding
from bert4torch.callbacks import Callback
from bert4torch.optimizers import get_linear_schedule_with_warmup
import json
import codecs
import numpy as np
from tqdm import tqdm
import jieba
import editdistance
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch import nn, optim
import re
The provided code snippet includes necessary dependencies for implementing the `is_equal` function. Write a Python function `def is_equal(R1, R2)` to solve the following problem:
判断两个SQL字典是否全匹配
Here is the function:
def is_equal(R1, R2):
"""判断两个SQL字典是否全匹配
"""
return (R1['cond_conn_op'] == R2['cond_conn_op']) &\
(set(zip(R1['sel'], R1['agg'])) == set(zip(R2['sel'], R2['agg']))) &\
(set([tuple(i) for i in R1['conds']]) == set([tuple(i) for i in R2['conds']])) | 判断两个SQL字典是否全匹配 |
20,918 | torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything, get_pool_emb
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import yaml
import re
from sklearn.metrics import accuracy_score,f1_score
import sys
from bert4torch.callbacks import AdversarialTraining
maxlen = 256
batch_size = 16
config_path = './chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = './chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = './chinese_L-12_H-768_A-12/vocab.txt'
dataset= '../datasets/auto_instructions.yml'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
ASADataset(ListDataset):
intents_categories,entity_categories = RASADataset(file_path=dataset).intent_entity_labels
print(intents_categories,entity_categories) = Tokenizer(dict_path, do_lower_case=True)
aloader = DataLoader(RASADataset(dataset), batch_size=batch_size, shuffle=True,collate_fn=collate_fn)
valid_dataloader = DataLoader(RASADataset(dataset), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
optim.Adam(model.parameters(), lr=2e-5))
f __name__ == '__main__' and 'train' in sys.argv:
evaluator = Evaluator()
adversarial_train = AdversarialTraining('fgm')
model.fit(train_dataloader, epochs=20, steps_per_epoch=None, callbacks=[evaluator,adversarial_train])
else:
test = "打开车窗"
model.load_weights('./result/best_model.pt')
tokens = tokenizer.tokenize(test, maxlen=maxlen)
token_ids = tokenizer.tokens_to_ids(tokens)
batch_token_ids = torch.tensor(sequence_padding([token_ids]), dtype=torch.long, device=device)
_, entity_pred,intent_pred = model.predict(batch_token_ids)
intent_pred = intents_categories[intent_pred.tolist()[0]]
entities=[]
for e in entity_pred:
entities.append({"entity":test[e[1]-1:e[2]],"type":entity_categories[e[3]-1]})
print(intent_pred,entities)
def collate_fn(batch):
batch_token_ids, batch_labels, batch_entity_ids, batch_entity_labels = [], [], [], []
intent_label_ids = []
for d in batch:
tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
mapping = tokenizer.rematch(d[0], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
labels = np.zeros(len(token_ids))
entity_ids, entity_labels = [], [] #
for start, end, label in d[2:]:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
labels[start] = 1 # 标记B
labels[start + 1:end + 1] = 2 # 标记I
entity_ids.append([start, end])
entity_labels.append(entity_categories.index(label)+1)
if not entity_ids: # 至少要有一个标签
entity_ids.append([0, 0]) # 如果没有则用0填充
entity_labels.append(0)
batch_token_ids.append(token_ids)
batch_labels.append(labels)#batch_labels
batch_entity_ids.append(entity_ids)
batch_entity_labels.append(entity_labels)
intent_label=d[1]
intent_label_ids.append([intents_categories.index(intent_label)])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device) # [btz, 实体个数,start/end]
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels), dtype=torch.long, device=device) # [btz, 实体个数]
intent_label_ids = torch.tensor(intent_label_ids, dtype=torch.long, device=device)
return [batch_token_ids, batch_entity_ids], [batch_labels, batch_entity_labels,intent_label_ids] | null |
20,919 | torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything, get_pool_emb
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import yaml
import re
from sklearn.metrics import accuracy_score,f1_score
import sys
from bert4torch.callbacks import AdversarialTraining
maxlen = 256
batch_size = 16
config_path = './chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = './chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = './chinese_L-12_H-768_A-12/vocab.txt'
dataset= '../datasets/auto_instructions.yml'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
ASADataset(ListDataset):
intents_categories,entity_categories = RASADataset(file_path=dataset).intent_entity_labels
print(intents_categories,entity_categories) = Tokenizer(dict_path, do_lower_case=True)
aloader = DataLoader(RASADataset(dataset), batch_size=batch_size, shuffle=True,collate_fn=collate_fn)
valid_dataloader = DataLoader(RASADataset(dataset), batch_size=batch_size, collate_fn=collate_fn)
eModel):
def predict(self, token_ids):
self.eval()
with torch.no_grad():
# 一阶段推理
last_hidden_state, pooled_output = self.bert([token_ids]) # [btz, seq_len, hdsz]
output = self.dropout(pooled_output)
output = self.dense3(output)
intent_pred = torch.argmax(output, dim=-1)
emission_score = self.dense1(last_hidden_state) # [bts, seq_len, tag_size]
attention_mask = token_ids.gt(0)
best_path = self.crf.decode(emission_score, attention_mask) # [bts, seq_len]
# 二阶段推理
batch_entity_ids = []
for one_samp in best_path:
entity_ids = []
for j, item in enumerate(one_samp):
if item.item() == 1: # B
entity_ids.append([j, j])
elif len(entity_ids) == 0:
continue
elif (len(entity_ids[-1]) > 0) and (item.item() == 2): # I
entity_ids[-1][-1] = j
elif len(entity_ids[-1]) > 0:
entity_ids.append([])
if not entity_ids: # 至少要有一个标签
entity_ids.append([0, 0]) # 如果没有则用0填充
batch_entity_ids.append([i for i in entity_ids if i])
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device) # [btz, 实体个数,start/end]
btz, entity_count, _ = batch_entity_ids.shape
hidden_size = last_hidden_state.shape[-1]
gather_index = batch_entity_ids.reshape(btz, -1, 1).repeat(1, 1, hidden_size)
entity_states = torch.gather(last_hidden_state, dim=1, index=gather_index).reshape(btz, entity_count, -1, hidden_size)
entity_states = torch.mean(entity_states, dim=2) # 取实体首尾hidden_states的均值
entity_logit = self.dense2(entity_states) # [btz, 实体个数,实体类型数]
entity_pred = torch.argmax(entity_logit, dim=-1) # [btz, 实体个数]
# 每个元素为一个三元组
entity_tulpe = trans_entity2tuple(batch_entity_ids, entity_pred)
return best_path, entity_tulpe,intent_pred
model = Model().to(device)
optim.Adam(model.parameters(), lr=2e-5))
def trans_entity2tuple(entity_ids, entity_labels):
'''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
'''
entity_true = set()
for i, one_sample in enumerate(entity_ids):
for j, item in enumerate(one_sample):
if item[0].item() * item[1].item() != 0:
entity_true.add((i, item[0].item(), item[1].item(), entity_labels[i, j].item()))
return entity_true
f __name__ == '__main__' and 'train' in sys.argv:
evaluator = Evaluator()
adversarial_train = AdversarialTraining('fgm')
model.fit(train_dataloader, epochs=20, steps_per_epoch=None, callbacks=[evaluator,adversarial_train])
else:
test = "打开车窗"
model.load_weights('./result/best_model.pt')
tokens = tokenizer.tokenize(test, maxlen=maxlen)
token_ids = tokenizer.tokens_to_ids(tokens)
batch_token_ids = torch.tensor(sequence_padding([token_ids]), dtype=torch.long, device=device)
_, entity_pred,intent_pred = model.predict(batch_token_ids)
intent_pred = intents_categories[intent_pred.tolist()[0]]
entities=[]
for e in entity_pred:
entities.append({"entity":test[e[1]-1:e[2]],"type":entity_categories[e[3]-1]})
print(intent_pred,entities)
def evaluate(data):
X1, Y1, Z1 = 1e-10, 1e-10, 1e-10
X2, Y2, Z2 = 1e-10, 1e-10, 1e-10
intentLabels =[]
intentPreds = []
for (token_ids, entity_ids), (label, entity_labels,intent_labels) in tqdm(data):
scores, entity_pred,intent_pred = model.predict(token_ids) # [btz, seq_len]
# 一阶段指标: token粒度
attention_mask = label.gt(0)
X1 += (scores.eq(label) * attention_mask).sum().item()
Y1 += scores.gt(0).sum().item()
Z1 += label.gt(0).sum().item()
# 二阶段指标:entity粒度
entity_true = trans_entity2tuple(entity_ids, entity_labels)
X2 += len(entity_pred.intersection(entity_true))
Y2 += len(entity_pred)
Z2 += len(entity_true)
intentLabels+=intent_labels.flatten().tolist()
intentPreds+=intent_pred.tolist()
intent_accuracy = accuracy_score(intentLabels, intentPreds)
# 意图指标
intent_f1 = f1_score(intentLabels, intentPreds, average='macro')
#f1, precision, recall = 2 * X1 / (Y1 + Z1), X1 / Y1, X1 / Z1
f2, precision2, recall2 = 2 * X2 / (Y2 + Z2), X2/ Y2, X2 / Z2
return f2, precision2, recall2,intent_accuracy,intent_f1 | null |
20,920 | e
from tqdm import tqdm
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
from bert4torch.generation import AutoRegressiveDecoder
import torch
from torch.utils.data import Dataset, DataLoader
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
maxlen = 256
batch_size = 8
epochs = 10000
fig_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
for text in batch:
token_ids, segment_ids = tokenizer.encode(text)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_token_ids
aloader = DataLoader(MyDataset('E:/data/corpus/pretrain/金庸小说/*.txt'),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
el = build_transformer_model(
config_path,
checkpoint_path,
with_mlm=True,
application='lm',
keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
add_trainer=True
).to(device)
summary(model, input_data=[next(iter(train_dataloader))[0]])
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1e-5))
toryCompletion(AutoRegressiveDecoder):
"""基于随机采样的故事续写
"""
story_completion = StoryCompletion(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=maxlen, device=device)
lass Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=epochs, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('./best_model.weights')
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
for text in batch:
token_ids, segment_ids = tokenizer.encode(text)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_token_ids | null |
20,921 | e
from tqdm import tqdm
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
from bert4torch.generation import AutoRegressiveDecoder
import torch
from torch.utils.data import Dataset, DataLoader
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
maxlen = 256
batch_size = 8
epochs = 10000
fig_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
for text in batch:
token_ids, segment_ids = tokenizer.encode(text)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_token_ids
aloader = DataLoader(MyDataset('E:/data/corpus/pretrain/金庸小说/*.txt'),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
el = build_transformer_model(
config_path,
checkpoint_path,
with_mlm=True,
application='lm',
keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
add_trainer=True
).to(device)
summary(model, input_data=[next(iter(train_dataloader))[0]])
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1e-5))
toryCompletion(AutoRegressiveDecoder):
"""基于随机采样的故事续写
"""
def generate(self, text, n=1, topp=0.95):
token_ids, _ = tokenizer.encode(text)
results = self.random_sample([token_ids[:-1]], n=n, topp=topp) # 基于随机采样
return [text + tokenizer.decode(ids.cpu().numpy()) for ids in results]
story_completion = StoryCompletion(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=maxlen, device=device)
lass Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=epochs, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('./best_model.weights')
def just_show():
s1 = u'当晚两人在一家小客店中宿歇。张无忌躺在炕上,越想越是担心,走到赵敏窗外,但听她呼吸调匀,正自香梦沉酣。'
s2 = u'虚竹飞身跃上松树的枝干,只见段延庆的钢杖深深嵌在树枝之中,全凭一股内力粘劲,挂住了下面四人,内力之深厚,实是非同小可。虚竹伸左手抓住钢杖,提将上来。'
s3 = u'杨过居住在侠客岛,是令狐冲的弟子,武器是金蛇剑。'
for s in [s1, s2, s3]:
t = story_completion.generate(s)
print(u'输入: %s' % s)
print(u'结果: %s\n' % ('\n'.join(t))) | null |
20,922 | from itertools import groupby
from tqdm import tqdm
from bert4torch.losses import SparseMultilabelCategoricalCrossentropy
from bert4torch.tokenizers import Tokenizer
from bert4torch.layers import EfficientGlobalPointer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
import torch
from torch import optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
maxlen = 128
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
batch_argu_labels, batch_head_labels, batch_tail_labels = [], [], []
for d in batch:
tokens = tokenizer.tokenize(d['text'], maxlen=maxlen)
# 这个函数的是把token在原始文本中的位置计算出来,返回是个二维数组
mapping = tokenizer.rematch(d['text'], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
# 整理事件
events = []
for e in d['events']:
events.append([])
for t, r, a, i in e:
label = labels.index((t, r))
start, end = i, i + len(a) - 1
if start in start_mapping and end in end_mapping:
start, end = start_mapping[start], end_mapping[end]
events[-1].append((label, start, end))
# 构建标签
argu_labels = [set() for _ in range(len(labels))]
head_labels, tail_labels = set(), set()
for e in events:
for l, h, t in e:
argu_labels[l].add((h, t))
for i1, (_, h1, t1) in enumerate(e):
for i2, (_, h2, t2) in enumerate(e):
if i2 > i1:
head_labels.add((min(h1, h2), max(h1, h2)))
tail_labels.add((min(t1, t2), max(t1, t2)))
for label in argu_labels + [head_labels, tail_labels]:
if not label: # 至少要有一个标签
label.add((0, 0)) # 如果没有则用0填充
argu_labels = sequence_padding([list(l) for l in argu_labels])
head_labels = sequence_padding([list(head_labels)])
tail_labels = sequence_padding([list(tail_labels)])
# 构建batch
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_argu_labels.append(argu_labels)
batch_head_labels.append(head_labels)
batch_tail_labels.append(tail_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_argu_labels = torch.tensor(sequence_padding(batch_argu_labels, seq_dims=2), dtype=torch.long, device=device)
batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.long, device=device)
batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.long, device=device)
# return X, Y
return [batch_token_ids, batch_segment_ids], [batch_argu_labels, batch_head_labels, batch_tail_labels] | null |
20,923 | from itertools import groupby
from tqdm import tqdm
from bert4torch.losses import SparseMultilabelCategoricalCrossentropy
from bert4torch.tokenizers import Tokenizer
from bert4torch.layers import EfficientGlobalPointer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
import torch
from torch import optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
batch_argu_labels, batch_head_labels, batch_tail_labels = [], [], []
for d in batch:
tokens = tokenizer.tokenize(d['text'], maxlen=maxlen)
# 这个函数的是把token在原始文本中的位置计算出来,返回是个二维数组
mapping = tokenizer.rematch(d['text'], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
# 整理事件
events = []
for e in d['events']:
events.append([])
for t, r, a, i in e:
label = labels.index((t, r))
start, end = i, i + len(a) - 1
if start in start_mapping and end in end_mapping:
start, end = start_mapping[start], end_mapping[end]
events[-1].append((label, start, end))
# 构建标签
argu_labels = [set() for _ in range(len(labels))]
head_labels, tail_labels = set(), set()
for e in events:
for l, h, t in e:
argu_labels[l].add((h, t))
for i1, (_, h1, t1) in enumerate(e):
for i2, (_, h2, t2) in enumerate(e):
if i2 > i1:
head_labels.add((min(h1, h2), max(h1, h2)))
tail_labels.add((min(t1, t2), max(t1, t2)))
for label in argu_labels + [head_labels, tail_labels]:
if not label: # 至少要有一个标签
label.add((0, 0)) # 如果没有则用0填充
argu_labels = sequence_padding([list(l) for l in argu_labels])
head_labels = sequence_padding([list(head_labels)])
tail_labels = sequence_padding([list(tail_labels)])
# 构建batch
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_argu_labels.append(argu_labels)
batch_head_labels.append(head_labels)
batch_tail_labels.append(tail_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_argu_labels = torch.tensor(sequence_padding(batch_argu_labels, seq_dims=2), dtype=torch.long, device=device)
batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.long, device=device)
batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.long, device=device)
# return X, Y
return [batch_token_ids, batch_segment_ids], [batch_argu_labels, batch_head_labels, batch_tail_labels]
class DedupList(list):
"""定义去重的list
"""
def append(self, x):
if x not in self:
super(DedupList, self).append(x)
def extract_events(text, threshold=0, trigger=True):
"""抽取输入text所包含的所有事件
"""
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor([token_ids], dtype=torch.long, device=device)
segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=device)
outputs = model.predict([token_ids, segment_ids])
# for item in outputs:
# print(item.shape)
outputs = [o[0].cpu().numpy() for o in outputs]
# 抽取论元
argus = set()
# 把首尾的CLS和SEP mask掉
outputs[0][:, [0, -1]] -= np.inf
outputs[0][:, :, [0, -1]] -= np.inf
for l, h, t in zip(*np.where(outputs[0] > threshold)):
argus.add(labels[l] + (h, t))
# 构建链接
links = set()
for i1, (_, _, h1, t1) in enumerate(argus):
for i2, (_, _, h2, t2) in enumerate(argus):
if i2 > i1:
if outputs[1][0, min(h1, h2), max(h1, h2)] > threshold:
if outputs[2][0, min(t1, t2), max(t1, t2)] > threshold:
links.add((h1, t1, h2, t2))
links.add((h2, t2, h1, t1))
# 析出事件
events = []
for _, sub_argus in groupby(sorted(argus), key=lambda s: s[0]):
for event in clique_search(list(sub_argus), links):
events.append([])
for argu in event:
start, end = mapping[argu[2]][0], mapping[argu[3]][-1] + 1
events[-1].append(argu[:2] + (text[start:end], start))
if trigger and all([argu[1] != u'触发词' for argu in event]):
events.pop()
return events
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data, threshold=0)` to solve the following problem:
评估函数,计算f1、precision、recall
Here is the function:
def evaluate(data, threshold=0):
"""评估函数,计算f1、precision、recall
"""
ex, ey, ez = 1e-10, 1e-10, 1e-10 # 事件级别
ax, ay, az = 1e-10, 1e-10, 1e-10 # 论元级别
for d in tqdm(data, ncols=0):
pred_events = extract_events(d['text'], threshold, False)
# 事件级别
R, T = DedupList(), DedupList()
for event in pred_events:
if any([argu[1] == u'触发词' for argu in event]):
R.append(list(sorted(event)))
for event in d['events']:
T.append(list(sorted(event)))
for event in R:
if event in T:
ex += 1
ey += len(R)
ez += len(T)
# 论元级别
R, T = DedupList(), DedupList()
for event in pred_events:
for argu in event:
if argu[1] != u'触发词':
R.append(argu)
for event in d['events']:
for argu in event:
if argu[1] != u'触发词':
T.append(argu)
for argu in R:
if argu in T:
ax += 1
ay += len(R)
az += len(T)
e_f1, e_pr, e_rc = 2 * ex / (ey + ez), ex / ey, ex / ez
a_f1, a_pr, a_rc = 2 * ax / (ay + az), ax / ay, ax / az
return e_f1, e_pr, e_rc, a_f1, a_pr, a_rc | 评估函数,计算f1、precision、recall |
20,924 | from itertools import groupby
from tqdm import tqdm
from bert4torch.losses import SparseMultilabelCategoricalCrossentropy
from bert4torch.tokenizers import Tokenizer
from bert4torch.layers import EfficientGlobalPointer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
import torch
from torch import optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
with open(schema_path, 'r', encoding='utf-8') as f:
for l in f:
l = json.loads(l)
t = l['event_type']
for r in [u'触发词'] + [s['role'] for s in l['role_list']]:
labels.append((t, r))
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
batch_argu_labels, batch_head_labels, batch_tail_labels = [], [], []
for d in batch:
tokens = tokenizer.tokenize(d['text'], maxlen=maxlen)
# 这个函数的是把token在原始文本中的位置计算出来,返回是个二维数组
mapping = tokenizer.rematch(d['text'], tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
# 整理事件
events = []
for e in d['events']:
events.append([])
for t, r, a, i in e:
label = labels.index((t, r))
start, end = i, i + len(a) - 1
if start in start_mapping and end in end_mapping:
start, end = start_mapping[start], end_mapping[end]
events[-1].append((label, start, end))
# 构建标签
argu_labels = [set() for _ in range(len(labels))]
head_labels, tail_labels = set(), set()
for e in events:
for l, h, t in e:
argu_labels[l].add((h, t))
for i1, (_, h1, t1) in enumerate(e):
for i2, (_, h2, t2) in enumerate(e):
if i2 > i1:
head_labels.add((min(h1, h2), max(h1, h2)))
tail_labels.add((min(t1, t2), max(t1, t2)))
for label in argu_labels + [head_labels, tail_labels]:
if not label: # 至少要有一个标签
label.add((0, 0)) # 如果没有则用0填充
argu_labels = sequence_padding([list(l) for l in argu_labels])
head_labels = sequence_padding([list(head_labels)])
tail_labels = sequence_padding([list(tail_labels)])
# 构建batch
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_argu_labels.append(argu_labels)
batch_head_labels.append(head_labels)
batch_tail_labels.append(tail_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_argu_labels = torch.tensor(sequence_padding(batch_argu_labels, seq_dims=2), dtype=torch.long, device=device)
batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.long, device=device)
batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.long, device=device)
# return X, Y
return [batch_token_ids, batch_segment_ids], [batch_argu_labels, batch_head_labels, batch_tail_labels]
class DedupList(list):
"""定义去重的list
"""
def append(self, x):
if x not in self:
super(DedupList, self).append(x)
def extract_events(text, threshold=0, trigger=True):
"""抽取输入text所包含的所有事件
"""
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor([token_ids], dtype=torch.long, device=device)
segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=device)
outputs = model.predict([token_ids, segment_ids])
# for item in outputs:
# print(item.shape)
outputs = [o[0].cpu().numpy() for o in outputs]
# 抽取论元
argus = set()
# 把首尾的CLS和SEP mask掉
outputs[0][:, [0, -1]] -= np.inf
outputs[0][:, :, [0, -1]] -= np.inf
for l, h, t in zip(*np.where(outputs[0] > threshold)):
argus.add(labels[l] + (h, t))
# 构建链接
links = set()
for i1, (_, _, h1, t1) in enumerate(argus):
for i2, (_, _, h2, t2) in enumerate(argus):
if i2 > i1:
if outputs[1][0, min(h1, h2), max(h1, h2)] > threshold:
if outputs[2][0, min(t1, t2), max(t1, t2)] > threshold:
links.add((h1, t1, h2, t2))
links.add((h2, t2, h1, t1))
# 析出事件
events = []
for _, sub_argus in groupby(sorted(argus), key=lambda s: s[0]):
for event in clique_search(list(sub_argus), links):
events.append([])
for argu in event:
start, end = mapping[argu[2]][0], mapping[argu[3]][-1] + 1
events[-1].append(argu[:2] + (text[start:end], start))
if trigger and all([argu[1] != u'触发词' for argu in event]):
events.pop()
return events
def isin(event_a, event_b):
"""判断event_a是否event_b的一个子集
"""
if event_a['event_type'] != event_b['event_type']:
return False
for argu in event_a['arguments']:
if argu not in event_b['arguments']:
return False
return True
The provided code snippet includes necessary dependencies for implementing the `predict_to_file` function. Write a Python function `def predict_to_file(in_file, out_file)` to solve the following problem:
预测结果到文件,方便提交
Here is the function:
def predict_to_file(in_file, out_file):
"""预测结果到文件,方便提交
"""
fw = open(out_file, 'w', encoding='utf-8')
with open(in_file) as fr:
for l in tqdm(fr):
l = json.loads(l)
event_list = DedupList()
for event in extract_events(l['text']):
final_event = {
'event_type': event[0][0],
'arguments': DedupList()
}
for argu in event:
if argu[1] != u'触发词':
final_event['arguments'].append({
'role': argu[1],
'argument': argu[2]
})
event_list = [
event for event in event_list
if not isin(event, final_event)
]
if not any([isin(final_event, event) for event in event_list]):
event_list.append(final_event)
l['event_list'] = event_list
l = json.dumps(l, ensure_ascii=False, indent=4)
fw.write(l + '\n')
fw.close() | 预测结果到文件,方便提交 |
20,925 | from bert4torch.models import build_transformer_model, BaseModel, BERT
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
from bert4torch.layers import BertLayer
import torch
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from torchinfo import summary
import copy
from torch.distributions.bernoulli import Bernoulli
maxlen = 128
device = 'cuda' if torch.cuda.is_available() else 'cpu'
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten(
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,926 | from bert4torch.models import build_transformer_model, BaseModel, BERT
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
from bert4torch.layers import BertLayer
import torch
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from torchinfo import summary
import copy
from torch.distributions.bernoulli import Bernoulli
model = Model().to(device)
model.compile(loss=nn.CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5), scheduler=replacing_rate_scheduler,
metrics=['accuracy'])
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum()
return right / total | null |
20,927 | json
from bert4torch.models import build_transformer_model, BaseModel, BERT
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
from bert4torch.layers import BertLayer
import torch
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from torchinfo import summary
import copy
from torch.distributions.bernoulli import Bernoulli
maxlen = 128
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
model = Model().to(device)
model.compile(loss=nn.CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5), scheduler=replacing_rate_scheduler,
metrics=['accuracy'])
The provided code snippet includes necessary dependencies for implementing the `predict_to_file` function. Write a Python function `def predict_to_file(in_file, out_file)` to solve the following problem:
输出预测结果到文件 结果文件可以提交到 https://www.cluebenchmarks.com 评测。
Here is the function:
def predict_to_file(in_file, out_file):
"""输出预测结果到文件
结果文件可以提交到 https://www.cluebenchmarks.com 评测。
"""
fw = open(out_file, 'w')
with open(in_file) as fr:
for l in tqdm(fr):
l = json.loads(l)
text = l['sentence']
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
label = model.predict([[token_ids], [segment_ids]])[0].argmax()
l = json.dumps({'id': str(l['id']), 'label': str(label)})
fw.write(l + '\n')
fw.close() | 输出预测结果到文件 结果文件可以提交到 https://www.cluebenchmarks.com 评测。 |
20,928 | import os
import json
import torch
import sqlite3
import torch.optim as optim
import transformers
from tqdm import tqdm
from tokenizers import AddedToken
from func_timeout import func_set_timeout, FunctionTimedOut
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from transformers import T5TokenizerFast, T5ForConditionalGeneration, MT5ForConditionalGeneration
from transformers.optimization import Adafactor
from bert4torch.snippets import ListDataset, seed_everything
from bert4torch.callbacks import Callback
from bert4torch.models import BaseModel
device = 'cuda' if torch.cuda.is_available() else 'cpu'
text2sql_tokenizer = T5TokenizerFast.from_pretrained(model_name_or_path, add_prefix_space=True)
def collate_fn(batch):
batch_encoder_ids, batch_encoder_attn, batch_decoder_attn, batch_decoder_labels = [], [], [], []
for inputs, sqls, _, _ in batch:
tokenized_inputs = text2sql_tokenizer(inputs, padding = "max_length", return_tensors = "pt", max_length = 512, truncation = True)
with text2sql_tokenizer.as_target_tokenizer():
tokenized_outputs = text2sql_tokenizer(sqls, padding = "max_length", return_tensors = 'pt', max_length = 256, truncation = True)
encoder_input_ids = tokenized_inputs["input_ids"]
encoder_input_attention_mask = tokenized_inputs["attention_mask"]
decoder_attention_mask = tokenized_outputs["attention_mask"]
decoder_labels = tokenized_outputs["input_ids"]
decoder_labels[decoder_labels == text2sql_tokenizer.pad_token_id] = -100
batch_encoder_ids.append(encoder_input_ids)
batch_encoder_attn.append(encoder_input_attention_mask)
batch_decoder_attn.append(decoder_attention_mask)
batch_decoder_labels.append(decoder_labels)
batch_encoder_ids = torch.cat(batch_encoder_ids, dim=0).to(device=device)
batch_encoder_attn = torch.cat(batch_encoder_attn, dim=0).to(device=device)
batch_decoder_attn = torch.cat(batch_decoder_attn, dim=0).to(device=device)
batch_decoder_labels = torch.cat(batch_decoder_labels, dim=0).to(device=device)
return (batch_encoder_ids, batch_encoder_attn, batch_decoder_attn, batch_decoder_labels), None | null |
20,929 | import os
import json
import torch
import sqlite3
import torch.optim as optim
import transformers
from tqdm import tqdm
from tokenizers import AddedToken
from func_timeout import func_set_timeout, FunctionTimedOut
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from transformers import T5TokenizerFast, T5ForConditionalGeneration, MT5ForConditionalGeneration
from transformers.optimization import Adafactor
from bert4torch.snippets import ListDataset, seed_everything
from bert4torch.callbacks import Callback
from bert4torch.models import BaseModel
device = 'cuda' if torch.cuda.is_available() else 'cpu'
text2sql_tokenizer = T5TokenizerFast.from_pretrained(model_name_or_path, add_prefix_space=True)
def collate_fn_eval(batch):
batch_inputs, batch_encoder_ids, batch_encoder_attn, batch_db_id, batch_tc_original = [], [], [], [], []
for inputs, _, db_id, all_tc_original in batch:
tokenized_inputs = text2sql_tokenizer(inputs, padding = "max_length", return_tensors = "pt", max_length = 512, truncation = True)
encoder_input_ids = tokenized_inputs["input_ids"]
encoder_input_attention_mask = tokenized_inputs["attention_mask"]
batch_inputs.append(inputs)
batch_encoder_ids.append(encoder_input_ids)
batch_encoder_attn.append(encoder_input_attention_mask)
batch_db_id.append(db_id)
batch_tc_original.append(all_tc_original)
batch_encoder_ids = torch.cat(batch_encoder_ids, dim=0).to(device=device)
batch_encoder_attn = torch.cat(batch_encoder_attn, dim=0).to(device=device)
return batch_inputs, batch_encoder_ids, batch_encoder_attn, batch_db_id, batch_tc_original | null |
20,930 | import os
import json
import torch
import sqlite3
import torch.optim as optim
import transformers
from tqdm import tqdm
from tokenizers import AddedToken
from func_timeout import func_set_timeout, FunctionTimedOut
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from transformers import T5TokenizerFast, T5ForConditionalGeneration, MT5ForConditionalGeneration
from transformers.optimization import Adafactor
from bert4torch.snippets import ListDataset, seed_everything
from bert4torch.callbacks import Callback
from bert4torch.models import BaseModel
batch_size = 4
num_return_sequences = 8
def execute_sql(cursor, sql):
def get_cursor_from_path(sqlite_path):
def decode_sqls(db_path, generator_outputs, batch_db_ids, batch_inputs, tokenizer,batch_tc_original):
batch_size = generator_outputs.shape[0]
num_return_sequences = generator_outputs.shape[1]
final_sqls = []
for batch_id in range(batch_size):
pred_executable_sql = "sql placeholder"
db_id = batch_db_ids[batch_id]
db_file_path = db_path + "/{}/{}.sqlite".format(db_id, db_id)
for seq_id in range(num_return_sequences):
cursor = get_cursor_from_path(db_file_path)
pred_sequence = tokenizer.decode(generator_outputs[batch_id, seq_id, :], skip_special_tokens = True)
pred_sql = pred_sequence.split("|")[-1].strip()
pred_sql = pred_sql.replace("='", "= '").replace("!=", " !=").replace(",", " ,")
try:
# Note: execute_sql will be success for empty string
assert len(pred_sql) > 0, "pred sql is empty!"
results = execute_sql(cursor, pred_sql)
# if the current sql has no execution error, we record and return it
pred_executable_sql = pred_sql
cursor.close()
cursor.connection.close()
break
except Exception as e:
print(pred_sql)
print(e)
cursor.close()
cursor.connection.close()
except FunctionTimedOut as fto:
print(pred_sql)
print(fto)
del cursor
final_sqls.append(pred_executable_sql)
return final_sqls | null |
20,931 | import build_transformer_model, BaseModel
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.callbacks import Callback
from bert4torch.generation import AutoRegressiveDecoder
import torch
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import torch.nn as nn
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(token_dict, do_lower_case=True)
def collate_fn(batch):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append(label)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids, batch_labels], batch_token_ids | null |
20,932 | import build_transformer_model, BaseModel
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
from bert4torch.callbacks import Callback
from bert4torch.generation import AutoRegressiveDecoder
import torch
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import torch.nn as nn
def collate_fn(batch):
random_sentiment = RandomSentiment(
bos_token_id=tokenizer._token_start_id,
eos_token_id=tokenizer._token_end_id,
max_new_tokens=maxlen,
device=device
)
def just_show():
print(u'正面采样:')
print(random_sentiment.generate(1, 5, 0.95), '\n')
print(u'负面采样:')
print(random_sentiment.generate(0, 5, 0.95), '\n') | null |
20,933 | from bert4torch.tokenizers import Tokenizer, load_vocab
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from bert4torch.snippets import sequence_padding, ListDataset, take_along_dim
from bert4torch.callbacks import Callback
from cchess import *
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(token_dict)
tokenizer._token_unk_id = 0
count = 0
def get_count():
if count < 20000:
n = 8
elif count < 40000:
n = 4
elif count < 80000:
n = 2
else:
n = 1
return n
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
数据生成器
Here is the function:
def collate_fn(batch):
"""数据生成器
"""
batch_token_ids, batch_segment_ids = [], []
for text, _ in batch:
token_ids, segment_ids = tokenizer.encode(' '.join(text), maxlen=maxlen // get_count() + 1)
batch_token_ids.append([0] + token_ids[1:-1])
batch_segment_ids.append([0] + segment_ids[1:-1])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
global count
count += 1
return [batch_token_ids, batch_segment_ids], batch_token_ids | 数据生成器 |
20,934 | from bert4torch.tokenizers import Tokenizer, load_vocab
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from bert4torch.snippets import sequence_padding, ListDataset, take_along_dim
from bert4torch.callbacks import Callback
from cchess import *
def convert(tf_path, torch_path):
# 用 语言模型+棋谱 的方式监督训练一个下中国象棋模型
# 介绍:https://kexue.fm/archives/7877
# 只是转换苏神已经train好的模型,注意不是预训练模型
import numpy as np
import h5py
import torch
# 这里用的keras==2.3.1
from keras.engine import saving
torch_state_dict = {}
# 1表示transpose, 0表示不变
key_map = {
'Embedding-Token/embeddings:0': ['embeddings.word_embeddings.weight', 0],
'Embedding-Segment/embeddings:0': ['embeddings.segment_embeddings.weight', 0],
'Embedding-Position/embeddings:0': ['embeddings.position_embeddings.weight', 0],
'Embedding-Norm/gamma:0': ['embeddings.layerNorm.weight', 0],
'Embedding-Norm/beta:0': ['embeddings.layerNorm.bias', 0],
'MLM-Dense/kernel:0': ['mlmDense.weight', 1],
'MLM-Dense/bias:0': ['mlmDense.bias', 0],
'MLM-Norm/gamma:0': ['mlmLayerNorm.weight', 0],
'MLM-Norm/beta:0': ['mlmLayerNorm.bias', 0],
'MLM-Bias/bias:0': ['mlmBias', 0],
}
for i in range(12):
key_map.update({
f'Transformer-{i}-MultiHeadSelfAttention/dense_{i*6+1}/kernel:0': [f'encoderLayer.{i}.multiHeadAttention.q.weight', 1],
f'Transformer-{i}-MultiHeadSelfAttention/dense_{i*6+1}/bias:0': [f'encoderLayer.{i}.multiHeadAttention.q.bias', 0],
f'Transformer-{i}-MultiHeadSelfAttention/dense_{i*6+2}/kernel:0': [f'encoderLayer.{i}.multiHeadAttention.k.weight', 1],
f'Transformer-{i}-MultiHeadSelfAttention/dense_{i*6+2}/bias:0': [f'encoderLayer.{i}.multiHeadAttention.k.bias', 0],
f'Transformer-{i}-MultiHeadSelfAttention/dense_{i*6+3}/kernel:0': [f'encoderLayer.{i}.multiHeadAttention.v.weight', 1],
f'Transformer-{i}-MultiHeadSelfAttention/dense_{i*6+3}/bias:0': [f'encoderLayer.{i}.multiHeadAttention.v.bias', 0],
f'Transformer-{i}-MultiHeadSelfAttention/dense_{i*6+4}/kernel:0': [f'encoderLayer.{i}.multiHeadAttention.o.weight', 1],
f'Transformer-{i}-MultiHeadSelfAttention/dense_{i*6+4}/bias:0': [f'encoderLayer.{i}.multiHeadAttention.o.bias', 0],
f'Transformer-{i}-MultiHeadSelfAttention-Norm/gamma:0': [f'encoderLayer.{i}.attnLayerNorm.weight', 0],
f'Transformer-{i}-MultiHeadSelfAttention-Norm/beta:0': [f'encoderLayer.{i}.attnLayerNorm.bias', 0],
f'Transformer-{i}-FeedForward/dense_{i*6+5}/kernel:0': [f'encoderLayer.{i}.feedForward.intermediateDense.weight', 1],
f'Transformer-{i}-FeedForward/dense_{i*6+5}/bias:0': [f'encoderLayer.{i}.feedForward.intermediateDense.bias', 0],
f'Transformer-{i}-FeedForward/dense_{i*6+6}/kernel:0': [f'encoderLayer.{i}.feedForward.outputDense.weight', 1],
f'Transformer-{i}-FeedForward/dense_{i*6+6}/bias:0': [f'encoderLayer.{i}.feedForward.outputDense.bias', 0],
f'Transformer-{i}-FeedForward-Norm/gamma:0': [f'encoderLayer.{i}.ffnLayerNorm.weight', 0],
f'Transformer-{i}-FeedForward-Norm/beta:0': [f'encoderLayer.{i}.ffnLayerNorm.bias', 0],
})
consume_keys = set()
with h5py.File(tf_path, mode='r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
layer_names = saving.load_attributes_from_hdf5_group(f, 'layer_names')
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_names = saving.load_attributes_from_hdf5_group(g, 'weight_names')
weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]
for i, weight_name in enumerate(weight_names):
new_key = key_map[weight_name][0]
if key_map[weight_name][1] == 1: # transpose
torch_state_dict[new_key] = torch.from_numpy(weight_values[i]).T
else:
torch_state_dict[new_key] = torch.from_numpy(weight_values[i])
assert new_key not in consume_keys, 'duplicate keys'
consume_keys.add(new_key)
if hasattr(f, 'close'):
f.close()
elif hasattr(f.file, 'close'):
f.file.close()
torch_state_dict['mlmDecoder.weight'] = torch_state_dict['embeddings.word_embeddings.weight']
torch_state_dict['mlmDecoder.bias'] = torch_state_dict['mlmBias']
# for k, v in torch_state_dict.items():
# print(k, v.shape)
torch.save(torch_state_dict, torch_path) | null |
20,935 | import importlib.util
from typing import Any, Tuple, Union
import torch
from packaging import version
from torch4keras.snippets.import_utils import is_package_available
import sys
The provided code snippet includes necessary dependencies for implementing the `is_accelerate_available` function. Write a Python function `def is_accelerate_available(check_partial_state=False)` to solve the following problem:
是否可以使用accelerate
Here is the function:
def is_accelerate_available(check_partial_state=False):
'''是否可以使用accelerate'''
accelerate_available = importlib.util.find_spec("accelerate") is not None
if accelerate_available:
if check_partial_state:
return version.parse(importlib_metadata.version("accelerate")) >= version.parse("0.17.0")
else:
return True
else:
return False | 是否可以使用accelerate |
20,936 | import importlib.util
from typing import Any, Tuple, Union
import torch
from packaging import version
from torch4keras.snippets.import_utils import is_package_available
import sys
The provided code snippet includes necessary dependencies for implementing the `is_flash_attn_available` function. Write a Python function `def is_flash_attn_available()` to solve the following problem:
是否可以使用包flash_attn
Here is the function:
def is_flash_attn_available():
'''是否可以使用包flash_attn'''
_flash_attn_available = is_package_available("flash_attn") and \
version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0")
return _flash_attn_available and torch.cuda.is_available() | 是否可以使用包flash_attn |
20,937 | import importlib.util
from typing import Any, Tuple, Union
import torch
from packaging import version
from torch4keras.snippets.import_utils import is_package_available
import sys
The provided code snippet includes necessary dependencies for implementing the `is_xformers_available` function. Write a Python function `def is_xformers_available()` to solve the following problem:
是否可以使用xformers加速
Here is the function:
def is_xformers_available():
'''是否可以使用xformers加速'''
return is_package_available("xformers") | 是否可以使用xformers加速 |
20,938 | import importlib.util
from typing import Any, Tuple, Union
import torch
from packaging import version
from torch4keras.snippets.import_utils import is_package_available
import sys
The provided code snippet includes necessary dependencies for implementing the `is_fastapi_available` function. Write a Python function `def is_fastapi_available()` to solve the following problem:
是否可以使用包fastapi
Here is the function:
def is_fastapi_available():
'''是否可以使用包fastapi'''
return is_package_available('fastapi') | 是否可以使用包fastapi |
20,939 | import importlib.util
from typing import Any, Tuple, Union
import torch
from packaging import version
from torch4keras.snippets.import_utils import is_package_available
import sys
def is_pydantic_available():
return is_package_available('pydantic') | null |
20,940 | import importlib.util
from typing import Any, Tuple, Union
import torch
from packaging import version
from torch4keras.snippets.import_utils import is_package_available
import sys
def is_trl_available():
return is_package_available("trl") | null |
20,941 | import importlib.util
from typing import Any, Tuple, Union
import torch
from packaging import version
from torch4keras.snippets.import_utils import is_package_available
import sys
def is_sseclient_available():
return importlib.util.find_spec("sseclient") | null |
20,942 | import importlib.util
from typing import Any, Tuple, Union
import torch
from packaging import version
from torch4keras.snippets.import_utils import is_package_available
import sys
def is_streamlit_available():
return is_package_available('streamlit') | null |
20,943 | from contextlib import contextmanager
import torch
from torch import nn
import os
def parse_flag_from_env(key, default=False):
"""Returns truthy value for `key` from the env if available else the default."""
value = os.environ.get(key, str(default))
return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
def init_on_device(device: torch.device, include_buffers: bool = None):
"""
A context manager under which models are initialized with all parameters on the specified device.
Args:
device (`torch.device`):
Device to initialize all parameters on.
include_buffers (`bool`, *optional*):
Whether or not to also put all buffers on the meta device while initializing.
Example:
```python
import torch.nn as nn
from accelerate import init_on_device
with init_on_device(device=torch.device("cuda")):
tst = nn.Liner(100, 100) # on `cuda` device
```
"""
if include_buffers is None:
include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
# TODO(shingjan): remove the torch version check once older versions are deprecated
if (int(torch.__version__.split('.')[0]) > 1) and include_buffers:
with device:
yield
return
old_register_parameter = nn.Module.register_parameter
if include_buffers:
old_register_buffer = nn.Module.register_buffer
def register_empty_parameter(module, name, param):
old_register_parameter(module, name, param)
if param is not None:
param_cls = type(module._parameters[name])
kwargs = module._parameters[name].__dict__
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
def register_empty_buffer(module, name, buffer, persistent=True):
old_register_buffer(module, name, buffer, persistent=persistent)
if buffer is not None:
module._buffers[name] = module._buffers[name].to(device)
# Patch tensor creation
if include_buffers:
tensor_constructors_to_patch = {
torch_function_name: getattr(torch, torch_function_name)
for torch_function_name in ["empty", "zeros", "ones", "full"]
}
else:
tensor_constructors_to_patch = {}
def patch_tensor_constructor(fn):
def wrapper(*args, **kwargs):
kwargs["device"] = device
return fn(*args, **kwargs)
return wrapper
try:
nn.Module.register_parameter = register_empty_parameter
if include_buffers:
nn.Module.register_buffer = register_empty_buffer
for torch_function_name in tensor_constructors_to_patch.keys():
setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
yield
finally:
nn.Module.register_parameter = old_register_parameter
if include_buffers:
nn.Module.register_buffer = old_register_buffer
for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
setattr(torch, torch_function_name, old_torch_function)
The provided code snippet includes necessary dependencies for implementing the `init_empty_weights` function. Write a Python function `def init_empty_weights(include_buffers: bool = None)` to solve the following problem:
A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn from accelerate import init_empty_weights # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. </Tip>
Here is the function:
def init_empty_weights(include_buffers: bool = None):
"""
A context manager under which models are initialized with all parameters on the meta device, therefore creating an
empty model. Useful when just initializing the model would blow the available RAM.
Args:
include_buffers (`bool`, *optional*):
Whether or not to also put all buffers on the meta device while initializing.
Example:
```python
import torch.nn as nn
from accelerate import init_empty_weights
# Initialize a model with 100 billions parameters in no time and without using any RAM.
with init_empty_weights():
tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
```
<Tip warning={true}>
Any model created under this context manager has no weights. As such you can't do something like
`model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
</Tip>
"""
if include_buffers is None:
include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
yield f | A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn from accelerate import init_empty_weights # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. </Tip> |
20,944 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `insert_arguments` function. Write a Python function `def insert_arguments(**arguments)` to solve the following problem:
装饰器,为类方法增加参数(主要用于类的__init__方法)
Here is the function:
def insert_arguments(**arguments):
"""装饰器,为类方法增加参数(主要用于类的__init__方法)"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k, v in arguments.items():
if k in kwargs:
v = kwargs.pop(k)
setattr(self, k, v)
return func(self, *args, **kwargs)
return new_func
return actual_decorator | 装饰器,为类方法增加参数(主要用于类的__init__方法) |
20,945 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `delete_arguments` function. Write a Python function `def delete_arguments(*arguments)` to solve the following problem:
装饰器,为类方法删除参数(主要用于类的__init__方法)
Here is the function:
def delete_arguments(*arguments):
"""装饰器,为类方法删除参数(主要用于类的__init__方法)"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k in arguments:
if k in kwargs:
raise TypeError(
'%s got an unexpected keyword argument \'%s\'' %
(self.__class__.__name__, k)
)
return func(self, *args, **kwargs)
return new_func
return actual_decorator | 装饰器,为类方法删除参数(主要用于类的__init__方法) |
20,946 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `cal_ts_num` function. Write a Python function `def cal_ts_num(tensor_shape)` to solve the following problem:
查看某个tensor在gc中的数量
Here is the function:
def cal_ts_num(tensor_shape):
'''查看某个tensor在gc中的数量'''
cal_num = 0
for obj in gc.get_objects():
try:
if torch.is_tensor(obj): # or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
tensor = obj
else:
continue
if tensor.is_cuda and tensor.size() == tensor_shape:
print(tensor.shape)
cal_num+=1
except Exception as e:
print('A trivial exception occured: {}'.format(e))
print(cal_num) | 查看某个tensor在gc中的数量 |
20,947 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `get_state_dict_dtype` function. Write a Python function `def get_state_dict_dtype(state_dict)` to solve the following problem:
Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype.
Here is the function:
def get_state_dict_dtype(state_dict):
"""
Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype.
"""
for t in state_dict.values():
if t.is_floating_point():
return t.dtype
# if no floating dtype was found return whatever the first dtype is
else:
return next(state_dict.values()).dtype | Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype. |
20,948 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `set_default_torch_dtype` function. Write a Python function `def set_default_torch_dtype(dtype: torch.dtype, model_name='model') -> torch.dtype` to solve the following problem:
设置默认权重类型
Here is the function:
def set_default_torch_dtype(dtype: torch.dtype, model_name='model') -> torch.dtype:
"""设置默认权重类型"""
if not isinstance(model_name, str):
model_name = 'model'
mapping = {
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
'bfloat16': torch.bfloat16
}
if isinstance(dtype, str):
dtype = mapping[dtype]
if not dtype.is_floating_point:
raise ValueError(f"Can't instantiate {model_name} under dtype={dtype} since it is not a floating point dtype")
dtype_orig = torch.get_default_dtype()
torch.set_default_dtype(dtype)
if dtype_orig != dtype:
log_info(f"Instantiating {model_name} under default dtype {dtype}.")
return dtype, dtype_orig | 设置默认权重类型 |
20,949 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `load_state_dict_into_meta_model` function. Write a Python function `def load_state_dict_into_meta_model(model, state_dict, device_map=None, torch_dtype=None)` to solve the following problem:
把state_dict导入meta_model 为了代码简洁,这里device_map需要外部手动指定, 形式如{'embeddings.word_embeddings': 0, 'LayerNormFinal': 0, 'lm_head': 0}
Here is the function:
def load_state_dict_into_meta_model(model, state_dict, device_map=None, torch_dtype=None):
""" 把state_dict导入meta_model
为了代码简洁,这里device_map需要外部手动指定, 形式如{'embeddings.word_embeddings': 0, 'LayerNormFinal': 0, 'lm_head': 0}
"""
from accelerate.utils import set_module_tensor_to_device
for param_name, param in state_dict.items():
set_module_kwargs = {"value": param}
if (device_map is None) or (device_map == 'cpu'):
param_device = "cpu"
elif device_map == 'auto':
param_device = 'cuda' if torch.cuda.is_available() else 'cpu'
elif device_map in {'gpu', 'cuda'}:
param_device = 'cuda'
elif isinstance(device_map, torch.device) or isinstance(device_map, int):
param_device = device_map
elif isinstance(device_map, dict):
param_device = device_map[param_name]
else:
param_device = 'cpu'
log_warn(f'Args `device_map`={device_map} has not been pre maintained')
set_module_kwargs["dtype"] = torch_dtype or param.dtype
set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs) | 把state_dict导入meta_model 为了代码简洁,这里device_map需要外部手动指定, 形式如{'embeddings.word_embeddings': 0, 'LayerNormFinal': 0, 'lm_head': 0} |
20,950 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `old_checkpoint` function. Write a Python function `def old_checkpoint(function, model_kwargs)` to solve the following problem:
兼容torch<1.11.0时仅允许输入输出是位置参数 通过闭包来对返回参数进行控制
Here is the function:
def old_checkpoint(function, model_kwargs):
''' 兼容torch<1.11.0时仅允许输入输出是位置参数
通过闭包来对返回参数进行控制
'''
def create_custom_forward(module):
def custom_forward(*inputs):
outputs = module(*inputs)
if isinstance(outputs, dict):
setattr(create_custom_forward, 'outputs_keys', [v for v in outputs.keys()])
return tuple(outputs.values())
else:
return outputs
return custom_forward
args = []
__args = inspect.getargspec(type(function).forward)
arg_names, arg_defaults = __args[0][1:], __args[-1]
for i, arg_name in enumerate(arg_names):
args.append(model_kwargs.get(arg_name, arg_defaults[i]))
preserve = model_kwargs.pop('preserve_rng_state', True)
outputs = CheckpointFunction.apply(create_custom_forward(function), preserve, *args)
if hasattr(create_custom_forward, 'outputs_keys'):
return dict(zip(create_custom_forward.outputs_keys, outputs))
else:
return outputs | 兼容torch<1.11.0时仅允许输入输出是位置参数 通过闭包来对返回参数进行控制 |
20,951 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `cuda_empty_cache` function. Write a Python function `def cuda_empty_cache(device=None)` to solve the following problem:
清理gpu显存
Here is the function:
def cuda_empty_cache(device=None):
'''清理gpu显存'''
if torch.cuda.is_available():
if device is None:
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
return
with torch.cuda.device(device):
torch.cuda.empty_cache()
torch.cuda.ipc_collect() | 清理gpu显存 |
20,952 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `modify_variable_mapping` function. Write a Python function `def modify_variable_mapping(original_func, **new_dict)` to solve the following problem:
对variable_mapping的返回值(字典)进行修改
Here is the function:
def modify_variable_mapping(original_func, **new_dict):
'''对variable_mapping的返回值(字典)进行修改
'''
def wrapper(*args, **kwargs):
# 调用原始函数并获取结果
result = original_func(*args, **kwargs)
# 对返回值进行修改
result.update(new_dict)
return result
return wrapper | 对variable_mapping的返回值(字典)进行修改 |
20,953 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
if os.environ.get('SAFETENSORS_FIRST', False):
SAFETENSORS_BINS = ['.safetensors', '.bin'] # 优先查找safetensors格式权重
else:
SAFETENSORS_BINS = ['.bin', '.safetensors'] # 优先查找bin格式权重
The provided code snippet includes necessary dependencies for implementing the `copytree` function. Write a Python function `def copytree(src:str, dst:str, ignore_copy_files:str=None, dirs_exist_ok=False)` to solve the following problem:
从一个文件夹copy到另一个文件夹 :param src: str, copy from src :param dst: str, copy to dst
Here is the function:
def copytree(src:str, dst:str, ignore_copy_files:str=None, dirs_exist_ok=False):
'''从一个文件夹copy到另一个文件夹
:param src: str, copy from src
:param dst: str, copy to dst
'''
def _ignore_copy_files(path, content):
to_ignore = []
if ignore_copy_files is None:
return to_ignore
for file_ in content:
for pattern in ignore_copy_files:
if re.search(pattern, file_):
to_ignore.append(file_)
return to_ignore
if src:
os.makedirs(src, exist_ok=True)
shutil.copytree(src, dst, ignore=_ignore_copy_files, dirs_exist_ok=dirs_exist_ok) | 从一个文件夹copy到另一个文件夹 :param src: str, copy from src :param dst: str, copy to dst |
20,954 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
if os.environ.get('SAFETENSORS_FIRST', False):
SAFETENSORS_BINS = ['.safetensors', '.bin'] # 优先查找safetensors格式权重
else:
SAFETENSORS_BINS = ['.bin', '.safetensors'] # 优先查找bin格式权重
def snapshot_download(
repo_id: str,
filename: str = None,
revision: str = None,
cache_dir: Union[str, Path, None] = None,
library_name: str = None,
library_version: str = None,
user_agent: Union[Dict, str, None] = None,
**kwargs
) -> str:
"""
Download pretrained model from https://huggingface.co/
"""
_commit_hash = kwargs.get('_commit_hash', None)
force_download = kwargs.get('force_download', False)
local_files_only = kwargs.get('local_files_only', False)
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.utils import EntryNotFoundError
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
if cache_dir is None:
cache_dir = HUGGINGFACE_HUB_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
repo_cache = os.path.join(cache_dir, f'models--{repo_id.replace("/", "--")}')
storage_folder = None
if filename is None:
# 下载repo下所有文件
b4t_filenames_path = os.path.join(repo_cache, 'bert4torch_filenames.json')
if os.path.exists(b4t_filenames_path) and local_files_only:
file_names = json.load(open(b4t_filenames_path, "r", encoding='utf-8'))
else:
model_info = HfApi().model_info(repo_id=repo_id, revision=revision)
file_names = []
for model_file in model_info.siblings:
file_name = model_file.rfilename
if file_name.endswith(".h5") or file_name.endswith(".ot") or file_name.endswith(".msgpack"):
continue
file_names.append(file_name)
# 仅下载safetensors格式的
if any([i.endswith('.safetensors') for i in file_names]) and is_safetensors_available():
file_names = [i for i in file_names if not i.endswith('.bin')]
else: # 仅下载pytorch_model_*.bin
file_names = [i for i in file_names if not i.endswith('.safetensors')]
os.makedirs(os.path.dirname(b4t_filenames_path), exist_ok=True)
json.dump(file_names, open(b4t_filenames_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=4)
for file_name in file_names:
# 从cache中恢复
if (_commit_hash is not None and not force_download) or local_files_only:
resolved_file = try_to_load_from_cache(repo_id, file_name, cache_dir=cache_dir, revision=_commit_hash)
if resolved_file is not None:
if resolved_file is _CACHED_NO_EXIST:
log_error_once(f"Could not locate {filename} inside https://huggingface.co/{repo_id}/tree/main")
elif resolved_file.endswith('config.json'):
storage_folder = os.path.dirname(resolved_file)
log_info_once(f'Resume {repo_id} from {storage_folder}')
else:
# 下载指定文件
resolved_file = hf_hub_download(
repo_id = repo_id,
filename = file_name,
cache_dir = cache_dir,
revision = revision,
force_download = force_download,
# force_filename = filename,
library_name = library_name,
library_version = library_version,
user_agent = user_agent,
)
if resolved_file.endswith('config.json'):
storage_folder = os.path.dirname(resolved_file)
log_info(f'Download {repo_id} to {storage_folder}')
if os.path.exists(resolved_file + ".lock"):
os.remove(resolved_file + ".lock")
return storage_folder
else:
# 从cache中恢复
if (_commit_hash is not None and not force_download) or local_files_only:
resolved_file = try_to_load_from_cache(repo_id, filename, cache_dir=cache_dir, revision=_commit_hash)
if resolved_file is not None:
if resolved_file is _CACHED_NO_EXIST:
log_error_once(f"Could not locate {filename} inside https://huggingface.co/{repo_id}/tree/main")
resolved_file = None
else:
log_info(f'Resume {repo_id} from {resolved_file}')
else:
# 下载指定文件
try:
resolved_file = hf_hub_download(
repo_id = repo_id,
filename = filename,
cache_dir = cache_dir,
revision = revision,
force_download = force_download,
# force_filename = filename,
library_name = library_name,
library_version = library_version,
user_agent = user_agent,
endpoint = HF_ENDPOINT
)
log_info(f'Download {repo_id} to {resolved_file}')
except EntryNotFoundError:
log_error(
f"{repo_id} does not appear to have a file named {filename}. Checkout "
f"'https://huggingface.co/{repo_id}/tree/main' for available files."
)
resolved_file = None
return resolved_file
The provided code snippet includes necessary dependencies for implementing the `get_config_path` function. Write a Python function `def get_config_path(pretrained_model_name_or_path:str, allow_none=False, **kwargs) -> str` to solve the following problem:
获取local文件夹下的config文件路径 1. model_name: 从hf下载 2. local_file且config_path为None: 重新在local_file所在目录找对应的config_path 3. local_dir且config_path为None: 重新在local_dir找对应的config_path
Here is the function:
def get_config_path(pretrained_model_name_or_path:str, allow_none=False, **kwargs) -> str:
'''获取local文件夹下的config文件路径
1. model_name: 从hf下载
2. local_file且config_path为None: 重新在local_file所在目录找对应的config_path
3. local_dir且config_path为None: 重新在local_dir找对应的config_path
'''
if pretrained_model_name_or_path is None:
return pretrained_model_name_or_path
elif isinstance(pretrained_model_name_or_path, (tuple,list)):
pretrained_model_name_or_path = os.path.dirname(pretrained_model_name_or_path[0])
config_path = None
# 文件
if os.path.isfile(pretrained_model_name_or_path):
if pretrained_model_name_or_path.endswith('config.json'):
return pretrained_model_name_or_path
else:
pretrained_model_name_or_path = os.path.dirname(pretrained_model_name_or_path)
# 文件夹
if os.path.isdir(pretrained_model_name_or_path):
for _config in ['bert4torch_config.json', 'config.json']:
config_path = os.path.join(pretrained_model_name_or_path, _config)
if os.path.exists(config_path):
break
if (not allow_none) and (config_path is None):
raise FileNotFoundError('bert4torch_config.json or config.json not found')
# model_name: 从hf下载bert4torch_config.json文件
else:
# 独立的repo
if pretrained_model_name_or_path.startswith('Tongjilibo/'):
config_path = snapshot_download(pretrained_model_name_or_path, filename='bert4torch_config.json', **kwargs)
else:
filename = pretrained_model_name_or_path.split('/')[-1] + '/bert4torch_config.json'
config_path = snapshot_download('Tongjilibo/bert4torch_config', filename=filename, **kwargs)
return config_path | 获取local文件夹下的config文件路径 1. model_name: 从hf下载 2. local_file且config_path为None: 重新在local_file所在目录找对应的config_path 3. local_dir且config_path为None: 重新在local_dir找对应的config_path |
20,955 | import json
import torch
import gc
import inspect
from torch4keras.snippets import *
from torch.utils.checkpoint import CheckpointFunction
import shutil
import re
from pathlib import Path
if os.environ.get('SAFETENSORS_FIRST', False):
SAFETENSORS_BINS = ['.safetensors', '.bin'] # 优先查找safetensors格式权重
else:
SAFETENSORS_BINS = ['.bin', '.safetensors'] # 优先查找bin格式权重
def snapshot_download(
repo_id: str,
filename: str = None,
revision: str = None,
cache_dir: Union[str, Path, None] = None,
library_name: str = None,
library_version: str = None,
user_agent: Union[Dict, str, None] = None,
**kwargs
) -> str:
"""
Download pretrained model from https://huggingface.co/
"""
_commit_hash = kwargs.get('_commit_hash', None)
force_download = kwargs.get('force_download', False)
local_files_only = kwargs.get('local_files_only', False)
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.utils import EntryNotFoundError
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
if cache_dir is None:
cache_dir = HUGGINGFACE_HUB_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
repo_cache = os.path.join(cache_dir, f'models--{repo_id.replace("/", "--")}')
storage_folder = None
if filename is None:
# 下载repo下所有文件
b4t_filenames_path = os.path.join(repo_cache, 'bert4torch_filenames.json')
if os.path.exists(b4t_filenames_path) and local_files_only:
file_names = json.load(open(b4t_filenames_path, "r", encoding='utf-8'))
else:
model_info = HfApi().model_info(repo_id=repo_id, revision=revision)
file_names = []
for model_file in model_info.siblings:
file_name = model_file.rfilename
if file_name.endswith(".h5") or file_name.endswith(".ot") or file_name.endswith(".msgpack"):
continue
file_names.append(file_name)
# 仅下载safetensors格式的
if any([i.endswith('.safetensors') for i in file_names]) and is_safetensors_available():
file_names = [i for i in file_names if not i.endswith('.bin')]
else: # 仅下载pytorch_model_*.bin
file_names = [i for i in file_names if not i.endswith('.safetensors')]
os.makedirs(os.path.dirname(b4t_filenames_path), exist_ok=True)
json.dump(file_names, open(b4t_filenames_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=4)
for file_name in file_names:
# 从cache中恢复
if (_commit_hash is not None and not force_download) or local_files_only:
resolved_file = try_to_load_from_cache(repo_id, file_name, cache_dir=cache_dir, revision=_commit_hash)
if resolved_file is not None:
if resolved_file is _CACHED_NO_EXIST:
log_error_once(f"Could not locate {filename} inside https://huggingface.co/{repo_id}/tree/main")
elif resolved_file.endswith('config.json'):
storage_folder = os.path.dirname(resolved_file)
log_info_once(f'Resume {repo_id} from {storage_folder}')
else:
# 下载指定文件
resolved_file = hf_hub_download(
repo_id = repo_id,
filename = file_name,
cache_dir = cache_dir,
revision = revision,
force_download = force_download,
# force_filename = filename,
library_name = library_name,
library_version = library_version,
user_agent = user_agent,
)
if resolved_file.endswith('config.json'):
storage_folder = os.path.dirname(resolved_file)
log_info(f'Download {repo_id} to {storage_folder}')
if os.path.exists(resolved_file + ".lock"):
os.remove(resolved_file + ".lock")
return storage_folder
else:
# 从cache中恢复
if (_commit_hash is not None and not force_download) or local_files_only:
resolved_file = try_to_load_from_cache(repo_id, filename, cache_dir=cache_dir, revision=_commit_hash)
if resolved_file is not None:
if resolved_file is _CACHED_NO_EXIST:
log_error_once(f"Could not locate {filename} inside https://huggingface.co/{repo_id}/tree/main")
resolved_file = None
else:
log_info(f'Resume {repo_id} from {resolved_file}')
else:
# 下载指定文件
try:
resolved_file = hf_hub_download(
repo_id = repo_id,
filename = filename,
cache_dir = cache_dir,
revision = revision,
force_download = force_download,
# force_filename = filename,
library_name = library_name,
library_version = library_version,
user_agent = user_agent,
endpoint = HF_ENDPOINT
)
log_info(f'Download {repo_id} to {resolved_file}')
except EntryNotFoundError:
log_error(
f"{repo_id} does not appear to have a file named {filename}. Checkout "
f"'https://huggingface.co/{repo_id}/tree/main' for available files."
)
resolved_file = None
return resolved_file
The provided code snippet includes necessary dependencies for implementing the `get_checkpoint_path` function. Write a Python function `def get_checkpoint_path(pretrained_model_name_or_path:Union[str,list], **kwargs) -> Union[str,list]` to solve the following problem:
获取该local文件夹下的ckpt文件、文件列表 1. model_name: 从hf下载 2. local_file且config_path为None: 重新在local_file所在目录找对应的config_path 3. local_dir且config_path为None: 重新在local_dir找对应的config_path
Here is the function:
def get_checkpoint_path(pretrained_model_name_or_path:Union[str,list], **kwargs) -> Union[str,list]:
'''获取该local文件夹下的ckpt文件、文件列表
1. model_name: 从hf下载
2. local_file且config_path为None: 重新在local_file所在目录找对应的config_path
3. local_dir且config_path为None: 重新在local_dir找对应的config_path
'''
if pretrained_model_name_or_path is None:
return pretrained_model_name_or_path
# 文件列表
elif isinstance(pretrained_model_name_or_path, (tuple,list)):
return pretrained_model_name_or_path
# 文件
elif os.path.isfile(pretrained_model_name_or_path):
if pretrained_model_name_or_path.endswith('.bin') or pretrained_model_name_or_path.endswith('.safetensors'):
return pretrained_model_name_or_path
else:
pretrained_model_name_or_path = os.path.dirname(pretrained_model_name_or_path)
# model_name: 从hf下载模型
elif not os.path.isdir(pretrained_model_name_or_path):
pretrained_model_name_or_path = snapshot_download(pretrained_model_name_or_path, **kwargs)
# 文件夹
if os.path.isdir(pretrained_model_name_or_path):
for postfix in SAFETENSORS_BINS:
ckpt_names = [i for i in os.listdir(pretrained_model_name_or_path) if i.endswith(postfix)]
if len(ckpt_names) > 0:
pretrained_model_name_or_path = [os.path.join(pretrained_model_name_or_path, i) \
for i in os.listdir(pretrained_model_name_or_path) if i.endswith(postfix)]
break
if len(ckpt_names) == 0:
raise FileNotFoundError(f'No weights found in {pretrained_model_name_or_path}')
return pretrained_model_name_or_path | 获取该local文件夹下的ckpt文件、文件列表 1. model_name: 从hf下载 2. local_file且config_path为None: 重新在local_file所在目录找对应的config_path 3. local_dir且config_path为None: 重新在local_dir找对应的config_path |
20,956 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
The provided code snippet includes necessary dependencies for implementing the `is_string` function. Write a Python function `def is_string(s)` to solve the following problem:
判断是否是字符串
Here is the function:
def is_string(s):
"""判断是否是字符串"""
return isinstance(s, basestring) | 判断是否是字符串 |
20,957 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
The provided code snippet includes necessary dependencies for implementing the `truncate_sequences` function. Write a Python function `def truncate_sequences(maxlen, indices, *sequences)` to solve the following problem:
截断总长度至不超过maxlen
Here is the function:
def truncate_sequences(maxlen, indices, *sequences):
"""截断总长度至不超过maxlen"""
sequences = [s for s in sequences if s]
if not isinstance(indices, (list, tuple)):
indices = [indices] * len(sequences)
while True:
lengths = [len(s) for s in sequences]
if sum(lengths) > maxlen:
i = np.argmax(lengths)
sequences[i].pop(indices[i])
else:
return sequences | 截断总长度至不超过maxlen |
20,958 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
The provided code snippet includes necessary dependencies for implementing the `text_segmentate` function. Write a Python function `def text_segmentate(text, maxlen, seps='\n', strips=None, truncate=True)` to solve the following problem:
将文本按照标点符号划分为若干个短句 :param text: 待划分的句子 :param maxlen: int, 截断长度 :param seps: 分隔符 :param strips: ''.strip() :param truncate: True表示标点符号切分后仍然超长时, 按照maxlen硬截断分成若干个短句 :return: List[str], 划分后的句子列表
Here is the function:
def text_segmentate(text, maxlen, seps='\n', strips=None, truncate=True):
"""将文本按照标点符号划分为若干个短句
:param text: 待划分的句子
:param maxlen: int, 截断长度
:param seps: 分隔符
:param strips: ''.strip()
:param truncate: True表示标点符号切分后仍然超长时, 按照maxlen硬截断分成若干个短句
:return: List[str], 划分后的句子列表
"""
text = text.strip().strip(strips)
if seps and len(text) > maxlen:
pieces = text.split(seps[0])
text, texts = '', []
for i, p in enumerate(pieces):
if text and p and len(text) + len(p) > maxlen - 1:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips, truncate))
text = ''
if i + 1 == len(pieces):
text = text + p
else:
text = text + p + seps[0]
if text:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips, truncate))
return texts
elif truncate and (not seps) and (len(text) > maxlen):
# 标点符号用完,仍然超长,且设置了truncate=True
return [text[i*maxlen:(i+1)*maxlen] for i in range(0, int(np.ceil(len(text)/maxlen)))]
else:
return [text] | 将文本按照标点符号划分为若干个短句 :param text: 待划分的句子 :param maxlen: int, 截断长度 :param seps: 分隔符 :param strips: ''.strip() :param truncate: True表示标点符号切分后仍然超长时, 按照maxlen硬截断分成若干个短句 :return: List[str], 划分后的句子列表 |
20,959 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
The provided code snippet includes necessary dependencies for implementing the `merge_segmentate` function. Write a Python function `def merge_segmentate(sequences, maxlen, sep='')` to solve the following problem:
把m个句子合并成不超过maxlen的n个句子, 主要用途是合并碎句子 :param sequences: List(str), 短句子列表 :param maxlen: int, 最大长度 :param sep: str, 合并使用的分隔符, 可以是,。等标点符号
Here is the function:
def merge_segmentate(sequences, maxlen, sep=''):
'''把m个句子合并成不超过maxlen的n个句子, 主要用途是合并碎句子
:param sequences: List(str), 短句子列表
:param maxlen: int, 最大长度
:param sep: str, 合并使用的分隔符, 可以是,。等标点符号
'''
sequences_new = []
text = ''
for t in sequences:
if text and len(text + sep + t) <= maxlen:
text = text + sep + t
elif text:
sequences_new.append(text)
text = t
elif len(t) < maxlen: # text为空
text = t
else:
sequences_new.append(t)
text = ''
if text:
sequences_new.append(text)
return sequences_new | 把m个句子合并成不超过maxlen的n个句子, 主要用途是合并碎句子 :param sequences: List(str), 短句子列表 :param maxlen: int, 最大长度 :param sep: str, 合并使用的分隔符, 可以是,。等标点符号 |
20,960 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
The provided code snippet includes necessary dependencies for implementing the `text_augmentation` function. Write a Python function `def text_augmentation(texts, noise_dict=None, noise_len=0, noise_p=0.0, skip_words=None, strategy='random', allow_dup=True)` to solve the following problem:
简单的EDA策略, 增删改 :param texts: 需要增强的文本/文本list :param noise_dict: 噪音数据, 元素为str的list, tuple, set :param noise_len: 噪音长度, 优先试用 :param noise_p: 噪音比例 :param skip_words: 跳过的短语, string/list :param strategy: 修改的策略, 包含增insert, 删delete, 改replace, 随机random :param allow_dup: 是否允许同一个位置多次EDA
Here is the function:
def text_augmentation(texts, noise_dict=None, noise_len=0, noise_p=0.0, skip_words=None, strategy='random', allow_dup=True):
'''简单的EDA策略, 增删改
:param texts: 需要增强的文本/文本list
:param noise_dict: 噪音数据, 元素为str的list, tuple, set
:param noise_len: 噪音长度, 优先试用
:param noise_p: 噪音比例
:param skip_words: 跳过的短语, string/list
:param strategy: 修改的策略, 包含增insert, 删delete, 改replace, 随机random
:param allow_dup: 是否允许同一个位置多次EDA
'''
def insert(text, insert_idx, noise_dict):
text = list(text)
for i in insert_idx:
text[i] = text[i] + random.choice(noise_dict)
return ''.join(text)
def delete(text, delete_idx):
text = list(text)
for i in delete_idx:
text[i] = ''
return ''.join(text)
def replace(text, replace_idx, noise_dict):
text = list(text)
for i in replace_idx:
text[i] = random.choice(noise_dict)
return ''.join(text)
def search(pattern, sequence, keep_last=True):
"""从sequence中寻找子串pattern, 返回符合pattern的id集合"""
n = len(pattern)
pattern_idx_set = set()
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
pattern_idx_set = pattern_idx_set.union(set(range(i, i+n))) if keep_last else pattern_idx_set.union(set(range(i, i+n-1)))
return pattern_idx_set
if (noise_len==0) and (noise_p==0):
return texts
assert strategy in {'insert', 'delete', 'replace', 'random'}, 'EDA strategy only support insert, delete, replace, random'
if isinstance(texts, str):
texts = [texts]
if skip_words is None:
skip_words = []
elif isinstance(skip_words, str):
skip_words = [skip_words]
for id, text in enumerate(texts):
sel_len = noise_len if noise_len > 0 else int(len(text)*noise_p) # 噪声长度
skip_idx = set() # 不能修改的idx区间
for item in skip_words:
# insert时最后一位允许插入
skip_idx = skip_idx.union(search(item, text, strategy!='insert'))
sel_idxs = [i for i in range(len(text)) if i not in skip_idx] # 可供选择的idx区间
sel_len = sel_len if allow_dup else min(sel_len, len(sel_idxs)) # 无重复抽样需要抽样数小于总样本
if (sel_len == 0) or (len(sel_idxs) == 0): # 如果不可采样则跳过
continue
sel_idx = np.random.choice(sel_idxs, sel_len, replace=allow_dup)
if strategy == 'insert':
texts[id] = insert(text, sel_idx, noise_dict)
elif strategy == 'delete':
texts[id] = delete(text, sel_idx)
elif strategy == 'replace':
texts[id] = replace(text, sel_idx, noise_dict)
elif strategy == 'random':
if random.random() < 0.333:
skip_idx = set() # 不能修改的idx区间
for item in skip_words:
# insert时最后一位允许插入
skip_idx = skip_idx.union(search(item, text, keep_last=False))
texts[id] = insert(text, sel_idx, noise_dict)
elif random.random() < 0.667:
texts[id] = delete(text, sel_idx)
else:
texts[id] = replace(text, sel_idx, noise_dict)
return texts if len(texts) > 1 else texts[0] | 简单的EDA策略, 增删改 :param texts: 需要增强的文本/文本list :param noise_dict: 噪音数据, 元素为str的list, tuple, set :param noise_len: 噪音长度, 优先试用 :param noise_p: 噪音比例 :param skip_words: 跳过的短语, string/list :param strategy: 修改的策略, 包含增insert, 删delete, 改replace, 随机random :param allow_dup: 是否允许同一个位置多次EDA |
20,961 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
is_py2 = six.PY2
if not is_py2:
basestring = str
The provided code snippet includes necessary dependencies for implementing the `lowercase_and_normalize` function. Write a Python function `def lowercase_and_normalize(text, never_split=())` to solve the following problem:
转小写,并进行简单的标准化
Here is the function:
def lowercase_and_normalize(text, never_split=()):
"""转小写,并进行简单的标准化"""
if is_py2:
text = unicode(text)
# convert non-special tokens to lowercase
escaped_special_toks = [re.escape(s_tok) for s_tok in never_split]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
# text = text.lower()
text = unicodedata.normalize('NFD', text)
text = ''.join([ch for ch in text if unicodedata.category(ch) != 'Mn'])
return text | 转小写,并进行简单的标准化 |
20,962 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
The provided code snippet includes necessary dependencies for implementing the `sequence_padding` function. Write a Python function `def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post')` to solve the following problem:
将序列padding到同一长度
Here is the function:
def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post'):
"""将序列padding到同一长度"""
if isinstance(inputs[0], (np.ndarray, list)):
if length is None:
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
elif not hasattr(length, '__getitem__'):
length = [length]
slices = [np.s_[:length[i]] for i in range(seq_dims)]
slices = tuple(slices) if len(slices) > 1 else slices[0]
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
x = x[slices]
for i in range(seq_dims):
if mode in {'post', 'right'}:
pad_width[i] = (0, length[i] - np.shape(x)[i])
elif mode in {'pre', 'left'}:
pad_width[i] = (length[i] - np.shape(x)[i], 0)
else:
raise ValueError('"mode" argument must be "post/right" or "pre/left".')
x = np.pad(x, pad_width, 'constant', constant_values=value)
outputs.append(x)
return np.array(outputs)
elif isinstance(inputs[0], torch.Tensor):
assert mode in {'post', 'right'}, '"mode" argument must be "post/right" when element is torch.Tensor'
if length is not None:
inputs = [i[:length] for i in inputs]
return pad_sequence(inputs, padding_value=value, batch_first=True)
else:
raise ValueError('"input" argument must be tensor/list/ndarray.') | 将序列padding到同一长度 |
20,963 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
def parallel_apply_generator(func, iterable, workers, max_queue_size, dummy=False, random_seeds=True):
"""多进程或多线程地将func应用到iterable的每个元素中(直接从bert4keras中移植过来)。
注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是输出可能是func(c), func(a), func(b)。结果将作为一个
generator返回,其中每个item是输入的序号以及该输入对应的处理结果。
:param dummy: False是多进程/线性,True则是多线程/线性;
:param random_seeds: 每个进程的随机种子。
"""
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue, seed_queue = Queue(max_queue_size), Queue(), Queue()
if random_seeds is True:
random_seeds = [None] * workers
elif random_seeds is None or random_seeds is False:
random_seeds = []
for seed in random_seeds:
seed_queue.put(seed)
def worker_step(in_queue, out_queue):
"""单步函数包装成循环执行"""
if not seed_queue.empty():
np.random.seed(seed_queue.get())
while True:
i, d = in_queue.get()
r = func(d)
out_queue.put((i, r))
# 启动多进程/线程
pool = Pool(workers, worker_step, (in_queue, out_queue))
# 存入数据,取出结果
in_count, out_count = 0, 0
for i, d in enumerate(iterable):
in_count += 1
while True:
try:
in_queue.put((i, d), block=False)
break
except six.moves.queue.Full:
while out_queue.qsize() > max_queue_size:
yield out_queue.get()
out_count += 1
if out_queue.qsize() > 0:
yield out_queue.get()
out_count += 1
while out_count != in_count:
yield out_queue.get()
out_count += 1
pool.terminate()
The provided code snippet includes necessary dependencies for implementing the `parallel_apply` function. Write a Python function `def parallel_apply(func, iterable, workers, max_queue_size, callback=None, dummy=False, random_seeds=True, unordered=True)` to solve the following problem:
多进程或多线程地将func应用到iterable的每个元素中(直接从bert4keras中移植过来)。 注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是输出可能是func(c), func(a), func(b)。 :param callback: 处理单个输出的回调函数; :param dummy: False是多进程/线性,True则是多线程/线性;windows需设置dummy=True :param random_seeds: 每个进程的随机种子; :param unordered: 若为False,则按照输入顺序返回,仅当callback为None时生效。
Here is the function:
def parallel_apply(func, iterable, workers, max_queue_size, callback=None, dummy=False, random_seeds=True, unordered=True):
"""多进程或多线程地将func应用到iterable的每个元素中(直接从bert4keras中移植过来)。
注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是输出可能是func(c), func(a), func(b)。
:param callback: 处理单个输出的回调函数;
:param dummy: False是多进程/线性,True则是多线程/线性;windows需设置dummy=True
:param random_seeds: 每个进程的随机种子;
:param unordered: 若为False,则按照输入顺序返回,仅当callback为None时生效。
"""
generator = parallel_apply_generator(func, iterable, workers, max_queue_size, dummy, random_seeds)
if callback is None:
if unordered:
return [d for i, d in generator]
else:
results = sorted(generator, key=lambda d: d[0])
return [d for i, d in results]
else:
for i, d in generator:
callback(d) | 多进程或多线程地将func应用到iterable的每个元素中(直接从bert4keras中移植过来)。 注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是输出可能是func(c), func(a), func(b)。 :param callback: 处理单个输出的回调函数; :param dummy: False是多进程/线性,True则是多线程/线性;windows需设置dummy=True :param random_seeds: 每个进程的随机种子; :param unordered: 若为False,则按照输入顺序返回,仅当callback为None时生效。 |
20,964 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
The provided code snippet includes necessary dependencies for implementing the `get_pool_emb` function. Write a Python function `def get_pool_emb(hidden_state=None, pooled_output=None, attention_mask=None, pool_strategy='cls', custom_layer=None)` to solve the following problem:
获取句向量 :param hidden_state: torch.Tensor/List(torch.Tensor),last_hidden_state/all_encoded_layers :param pooled_output: torch.Tensor, bert的pool_output输出 :param attention_mask: torch.Tensor :param pool_strategy: str, ('cls', 'last-avg', 'mean', 'last-max', 'max', 'first-last-avg', 'custom') :param custom_layer: int/List[int],指定对某几层做average pooling
Here is the function:
def get_pool_emb(hidden_state=None, pooled_output=None, attention_mask=None, pool_strategy='cls', custom_layer=None):
''' 获取句向量
:param hidden_state: torch.Tensor/List(torch.Tensor),last_hidden_state/all_encoded_layers
:param pooled_output: torch.Tensor, bert的pool_output输出
:param attention_mask: torch.Tensor
:param pool_strategy: str, ('cls', 'last-avg', 'mean', 'last-max', 'max', 'first-last-avg', 'custom')
:param custom_layer: int/List[int],指定对某几层做average pooling
'''
if pool_strategy == 'pooler':
return pooled_output
elif pool_strategy == 'cls':
if isinstance(hidden_state, (list, tuple)):
hidden_state = hidden_state[-1]
assert isinstance(hidden_state, torch.Tensor), f'{pool_strategy} strategy request tensor hidden_state'
return hidden_state[:, 0]
elif pool_strategy in {'last-avg', 'mean'}:
if isinstance(hidden_state, (list, tuple)):
hidden_state = hidden_state[-1]
assert isinstance(hidden_state, torch.Tensor), f'{pool_strategy} pooling strategy request tensor hidden_state'
hid = torch.sum(hidden_state * attention_mask[:, :, None], dim=1)
attention_mask = torch.sum(attention_mask, dim=1)[:, None]
return hid / attention_mask
elif pool_strategy in {'last-max', 'max'}:
if isinstance(hidden_state, (list, tuple)):
hidden_state = hidden_state[-1]
assert isinstance(hidden_state, torch.Tensor), f'{pool_strategy} pooling strategy request tensor hidden_state'
hid = torch.masked_fill(hidden_state, (1-attention_mask[:, :, None]).bool(), torch.finfo(hidden_state.dtype).min)
return torch.max(hid, dim=1).values
elif pool_strategy == 'first-last-avg':
assert isinstance(hidden_state, list), f'{pool_strategy} pooling strategy request list hidden_state'
hid = torch.sum(hidden_state[1] * attention_mask[:, :, None], dim=1) # 这里不取0
hid += torch.sum(hidden_state[-1] * attention_mask[:, :, None], dim=1)
attention_mask = torch.sum(attention_mask, dim=1)[:, None]
return hid / (2 * attention_mask)
elif pool_strategy == 'custom':
# 取指定层
assert isinstance(hidden_state, list), f'{pool_strategy} pooling strategy request list hidden_state'
assert isinstance(custom_layer, (int, list, tuple)), f'{pool_strategy} pooling strategy request int/list/tuple custom_layer'
custom_layer = [custom_layer] if isinstance(custom_layer, int) else custom_layer
hid = 0
for i, layer in enumerate(custom_layer, start=1):
hid += torch.sum(hidden_state[layer] * attention_mask[:, :, None], dim=1)
attention_mask = torch.sum(attention_mask, dim=1)[:, None]
return hid / (i * attention_mask)
else:
raise ValueError('pool_strategy illegal') | 获取句向量 :param hidden_state: torch.Tensor/List(torch.Tensor),last_hidden_state/all_encoded_layers :param pooled_output: torch.Tensor, bert的pool_output输出 :param attention_mask: torch.Tensor :param pool_strategy: str, ('cls', 'last-avg', 'mean', 'last-max', 'max', 'first-last-avg', 'custom') :param custom_layer: int/List[int],指定对某几层做average pooling |
20,965 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
The provided code snippet includes necessary dependencies for implementing the `create_position_ids_start_at_padding` function. Write a Python function `def create_position_ids_start_at_padding(input_ids, padding_idx, past_key_values_length=0, start_padding_idx=True)` to solve the following problem:
生成padding_ids, 从padding_idx+1开始。忽略填充符号
Here is the function:
def create_position_ids_start_at_padding(input_ids, padding_idx, past_key_values_length=0, start_padding_idx=True):
"""生成padding_ids, 从padding_idx+1开始。忽略填充符号"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + (padding_idx if start_padding_idx else 0) | 生成padding_ids, 从padding_idx+1开始。忽略填充符号 |
20,966 | import unicodedata
import six
import numpy as np
import re
import torch
from torch.nn.utils.rnn import pad_sequence
from torch4keras.snippets import *
import random
def entity_extract_rule(text:str, pattern:str=None, label:str=None, start:int=0, end:int=-1, dotall:bool=True,
replace_pattern:Optional[Union[str,list]]=None, extract_pattern:Optional[Union[str,list]]=None, minlen:int=None, maxlen:int=None,
exist_subword:Union[list,str,tuple]=None, noexist_subword:Union[list,str,tuple]=None,
prefix_exist_subword:List[tuple]=None, prefix_noexist_subword:List[tuple]=None,
postfix_exist_subword:List[tuple]=None, postfix_noexist_subword:List[tuple]=None, **kwargs):
''' 按照预设的正则规则来从字符串中提取实体
:param text: 待提取的字符串
:param pattern: 提取的正则
:param label: 对应的标签
:param start/end: 是否对text进行截断
:param dotall: 正则识别时候是否忽略\n
:param replace_pattern: 对正则识别出来的结果二次修正, 比如去除前缀, 去除后缀
:param extract_pattern: 对正则识别出来的结果二次修正, 比如仅保留其中部分要素
:param minlen: 最短长度,低于长度(含)则不认为是有效实体
:param maxlen: 最长长度,超过长度(含)则不认为是有效实体
:param exist_subword: 必须包含的subword
:param noexist_subword: 必须不含的subword
:param prefix_exist_subword: 必须包含的prefix subword, 格式为[('subword', distance)]
:param prefix_noexist_subword: 必须不包含的prefix subword
:param postfix_exist_subword: 必须包含的postfix subword, 格式为[('subword', distance)]
:param postfix_noexist_subword: 必须不包含的postfix subword
Example
------------------------------------
text = '甲方:中国工商银行 乙方:中国农业银行 注册地址:上海市世纪大道1379号'
config = {'pattern': '甲方(:|:)(.*?)乙方',
'label': '甲方',
'replace_pattern': ['^甲方(:|:)', '乙方$']}
res = ner_extract_rule(text, **config)
print(res)
# return: [{'context': '中国工商银行 ', 'raw_context': '甲方:中国工商银行 乙方', 'start': 3, 'end': 10, 'label': '甲方'}]
'''
def adjust_start_end(context, new_context, start):
if new_context in context:
start += context.index(new_context)
end = start + len(new_context)
return new_context, start, end
else:
log_warn(f'{new_context} <------- not in -------> {context}')
return context, start, start+len(context)
# 截取一下
if start != 0:
text = text[start:]
if end != -1:
text = text[:end]
if dotall:
# 中间可以匹配换行符
iters = re.finditer(pattern, text, re.DOTALL)
else:
# 中间不可匹配换行
iters = re.finditer(pattern, text)
result = []
for iter in iters:
context = raw_context = iter.group()
start, end = iter.start(), iter.end()
# 提取的pattern
if extract_pattern is not None:
if isinstance(extract_pattern, str):
extract_pattern = [extract_pattern]
for pat in extract_pattern:
if re.search(pat, context):
new_context = next(re.finditer(pat, context)).group()
context, start, end = adjust_start_end(context, new_context, start)
# 删除的pattern
if replace_pattern is not None:
if isinstance(replace_pattern, str):
replace_pattern = [replace_pattern]
for rep_pat in replace_pattern:
if re.search(rep_pat, context):
new_context = re.sub(rep_pat, '', context)
context, start, end = adjust_start_end(context, new_context, start)
# 太短
if (minlen is not None) and (len(context) <= minlen):
continue
# 超长
if (maxlen is not None) and (len(context) >= maxlen):
continue
# exist_subword: 必须存在的subword
if exist_subword is not None:
if isinstance(exist_subword, str) and (not re.search(exist_subword, context)):
continue
elif isinstance(exist_subword, (tuple, list)):
continue_tag= False
for item in exist_subword:
if not re.search(item, context):
continue_tag = True
break
if continue_tag:
continue
# noexist_subword: 必须不存在的subword
if noexist_subword is not None:
if isinstance(noexist_subword, str) and re.search(noexist_subword, context):
continue
elif isinstance(noexist_subword, (tuple, list)):
continue_tag= False
for item in noexist_subword:
if re.search(item, context):
continue_tag = True
break
if continue_tag:
continue
# prefix_exist_subword: prefix中必须存在的subword
if prefix_exist_subword is not None:
assert isinstance(prefix_exist_subword, (tuple, list)), 'prefix_exist_subword only accept tuple/list format'
prefix_exist_subword = [prefix_exist_subword] if isinstance(prefix_exist_subword[0], str) else prefix_exist_subword
continue_tag= False
for item, offset in prefix_exist_subword:
if not re.search(item, text[start-offset:start]):
continue_tag = True
break
if continue_tag:
continue
# prefix_noexist_subword: prefix中必须不存在的subword
if prefix_noexist_subword is not None:
assert isinstance(prefix_noexist_subword, (tuple, list)), 'prefix_noexist_subword only accept tuple/list format'
prefix_noexist_subword = [prefix_noexist_subword] if isinstance(prefix_noexist_subword[0], str) else prefix_noexist_subword
continue_tag= False
for item, offset in prefix_noexist_subword:
if re.search(item, text[start-offset:start]):
continue_tag = True
break
if continue_tag:
continue
# postfix_exist_subword: postfix中必须存在的subword
if postfix_exist_subword is not None:
assert isinstance(postfix_exist_subword, (tuple, list)), 'postfix_exist_subword only accept tuple/list format'
postfix_exist_subword = [postfix_exist_subword] if isinstance(postfix_exist_subword[0], str) else postfix_exist_subword
continue_tag= False
for item, offset in postfix_exist_subword:
if not re.search(item, text[end:end+offset]):
continue_tag = True
break
if continue_tag:
continue
# postfix_noexist_subword: postfix中必须不存在的subword
if postfix_noexist_subword is not None:
assert isinstance(postfix_noexist_subword, (tuple, list)), 'postfix_noexist_subword only accept tuple/list format'
postfix_noexist_subword = [postfix_noexist_subword] if isinstance(postfix_noexist_subword[0], str) else postfix_noexist_subword
continue_tag= False
for item, offset in postfix_noexist_subword:
if re.search(item, text[end:end+offset]):
continue_tag = True
break
if continue_tag:
continue
assert context == text[start:end]
result.append({'context': context,
'raw_context': raw_context,
'start': start,
'end': end,
'label': label,
**kwargs})
return result
The provided code snippet includes necessary dependencies for implementing the `entity_extract_rule_placeholder` function. Write a Python function `def entity_extract_rule_placeholder(self, text, **pat_config)` to solve the following problem:
按照预设的正则规则来解析实体, 允许占位符
Here is the function:
def entity_extract_rule_placeholder(self, text, **pat_config):
''' 按照预设的正则规则来解析实体, 允许占位符
'''
placeholder = pat_config.get('placeholder')
result = []
if placeholder is not None:
pattern = pat_config.pop('pattern')
for ph, pv in placeholder.items():
if isinstance(pv, str):
result.extend(entity_extract_rule(text, pattern.replace(ph, pv), **pat_config))
elif isinstance(pv, (tuple,list)):
for pv_i in pv:
result.extend(entity_extract_rule(text, pattern.replace(ph, pv_i), **pat_config))
else:
result.extend(entity_extract_rule(text, **pat_config))
return result | 按照预设的正则规则来解析实体, 允许占位符 |
20,967 | import torch
import torch.nn as nn
import numpy as np
from bert4torch.activations import get_activation
from bert4torch.layers.core import LayerNorm
import random
import warnings
import math
class BottleneckAdapterLayer(nn.Module):
'''BottleneckAdapterLayer'''
def __init__(self, adapter_input_size, bottleneck_size, adapter_non_linearity='gelu'):
super().__init__()
self.adapter_input_size = adapter_input_size
self.bottleneck_size = bottleneck_size
self.non_linearity = get_activation(adapter_non_linearity)
# down proj
self.down_proj = nn.Linear(self.adapter_input_size, self.bottleneck_size)
# up proj
self.up_proj = nn.Linear(self.bottleneck_size, self.adapter_input_size)
self.init_weights()
def init_weights(self, init_mean=0.0, init_std=0.01):
self.down_proj.weight.data.normal_(mean=init_mean, std=init_std)
self.down_proj.bias.data.zero_()
self.up_proj.weight.data.normal_(mean=init_mean, std=init_std)
self.up_proj.bias.data.zero_()
def forward(self, x):
output = self.up_proj(self.non_linearity(self.down_proj(x)))
output = x + output
return output
def add_adapter(model, adapter_method='bottleneck', bottlenect_size=64):
# 冻结模型参数
for param in model.parameters():
param.requires_grad = False
if adapter_method == 'bottleneck':
# https://paperswithcode.com/paper/parameter-efficient-transfer-learning-for-nlp
# https://arxiv.org/pdf/1902.00751v2.pdf
# 顺序为: Attention --> Adapter --> Add --> LN --> FeedForward --> Adapter --> Add --> LayerNorm
try:
layers = model.encoderLayer
except:
layers = model.decoderLayer
# TODO: 这里需要测试
for layer_id in range(model.num_hidden_layers):
transformer_layer = layers[layer_id].multiHeadAttention.o
out_featuers = transformer_layer.out_features
adapter1 = BottleneckAdapterLayer(out_featuers, bottleneck_size=bottlenect_size)
layers[layer_id].multiHeadAttention.o = nn.Sequential(transformer_layer, adapter1)
transformer_layer = layers[layer_id].feedForward
out_featuers = transformer_layer.outputDense.out_features
adapter2 = BottleneckAdapterLayer(out_featuers, bottleneck_size=bottlenect_size)
layers[layer_id].feedForward = nn.Sequential(transformer_layer, adapter2)
# 待新增其余类型adapter
else:
pass
return model | null |
20,968 | from torch import nn
import torch
import math
import torch.nn.functional as F
from typing import Union, List
The provided code snippet includes necessary dependencies for implementing the `get_sinusoid_encoding_table` function. Write a Python function `def get_sinusoid_encoding_table(n_position, d_hid, base=10000.0, ntk_alpha=1.0, rope_ratio=1.0, padding_idx=None)` to solve the following problem:
sinusoid编码 :param n_position: int, 位置长度 :param d_hid: int, 位置编码长度 :param padding_idx: padding的token_ids :param ntk_alpha: int, 要扩展的倍数 :param rope_ratio: int, chatglm中32k的插值 :return: [seq_len, d_hid]
Here is the function:
def get_sinusoid_encoding_table(n_position, d_hid, base=10000.0, ntk_alpha=1.0, rope_ratio=1.0, padding_idx=None):
''' sinusoid编码
:param n_position: int, 位置长度
:param d_hid: int, 位置编码长度
:param padding_idx: padding的token_ids
:param ntk_alpha: int, 要扩展的倍数
:param rope_ratio: int, chatglm中32k的插值
:return: [seq_len, d_hid]
'''
if ntk_alpha != 1:
base = base * ntk_alpha ** (d_hid / (d_hid-2))
position = torch.arange(0, n_position, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_hid, 2).float() * (-math.log(base) / d_hid))
embeddings_table = torch.zeros(n_position, d_hid)
if rope_ratio != 0:
position = position / rope_ratio
embeddings_table[:, 0::2] = torch.sin(position * div_term)
embeddings_table[:, 1::2] = torch.cos(position * div_term)
return embeddings_table
# 第二种实现
position_ids = torch.arange(0, n_position).unsqueeze(1)
position_ids = position_ids.expand(-1, d_hid)
indices = torch.arange(0, d_hid)
position_ids = position_ids * torch.pow(10000, -2 * torch.true_divide(torch.floor_divide(indices, 2), d_hid))
position_ids[:, ::2] = torch.sin(position_ids[:, ::2])
position_ids[:, 1::2] = torch.cos(position_ids[:, 1::2])
return position_ids | sinusoid编码 :param n_position: int, 位置长度 :param d_hid: int, 位置编码长度 :param padding_idx: padding的token_ids :param ntk_alpha: int, 要扩展的倍数 :param rope_ratio: int, chatglm中32k的插值 :return: [seq_len, d_hid] |
20,969 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
from typing import Any, Literal
import unicodedata
from io import open
from bert4torch.snippets import truncate_sequences, is_string, lowercase_and_normalize, sequence_padding
import re
import six
from collections import OrderedDict
import torch
class Tokenizer(TokenizerBase):
"""Bert原生分词器
"""
def __init__(self, token_dict, do_lower_case=True, do_basic_tokenize=True, do_tokenize_unk=False, **kwargs):
"""
参数:
token_dict:
词典文件
do_lower_case:
是否转换成小写
do_basic_tokenize:
分词前,是否进行基础的分词
do_tokenize_unk:
分词后,是否生成[UNK]标记,还是在encode阶段生成
"""
super(Tokenizer, self).__init__(**kwargs)
if is_string(token_dict):
token_dict = load_vocab(token_dict)
self._do_lower_case = do_lower_case
self._vocab_size = len(token_dict)
self._token_dict = token_dict
self._token_dict_inv = {v: k for k, v in token_dict.items()}
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=self.never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self._token_dict, unk_token=self._token_unk, do_tokenize_unk=do_tokenize_unk)
# 以下写在外面是方便有代码提示
self._token_pad_id = self.pad_token_id = None
self._token_unk_id = self.unk_token_id = None
self._token_mask_id = self.mask_token_id = None
self._token_start_id = self.start_token_id = None
self._token_end_id = self.end_token_id = None
for token in ['pad', 'unk', 'mask', 'start', 'end']:
try:
_token_id = token_dict[getattr(self, '_token_%s' % token)]
setattr(self, '_token_%s_id' % token, _token_id)
setattr(self, '%s_token_id' % token, _token_id)
except:
delattr(self, '_token_%s_id' % token)
delattr(self, '%s_token_id' % token)
def _tokenize(self, text, pre_tokenize=True):
"""基本分词函数
"""
# 以下pre_tokenizer逻辑参考bert4keras
if self._do_lower_case:
text = lowercase_and_normalize(text, never_split=self.never_split)
if pre_tokenize and self._pre_tokenize is not None:
tokens = []
for token in self._pre_tokenize(text):
if token in self._token_dict:
tokens.append(token)
else:
tokens.extend(self._tokenize(token, False))
return tokens
# 以下逻辑参考pytorch版本bert分词器自己的
text_pieces = self.tokens_trie.split(text) # 新增逻辑,主要是special_tokens的分词
split_tokens = []
for text_piece in text_pieces:
if not text_piece:
continue
elif text_piece in self._token_dict:
split_tokens.append(text_piece)
elif self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text_piece):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens.extend(self.wordpiece_tokenizer.tokenize(text_piece))
return split_tokens
def token_to_id(self, token):
"""token转为vocab中的id"""
return self._token_dict.get(token, self._token_unk_id)
def id_to_token(self, id):
"""id转为词表中的token"""
return self._token_dict_inv[id]
def decode(self, ids, tokens=None):
"""转为可读文本
"""
tokens = tokens or self.ids_to_tokens(ids)
tokens = [token for token in tokens if not self._is_special(token)]
text, flag = '', False
for i, token in enumerate(tokens):
if token[:2] == '##':
text += token[2:]
elif len(token) == 1 and self._is_cjk_character(token):
text += token
elif len(token) == 1 and self._is_punctuation(token):
text += token
text += ' '
elif i > 0 and self._is_cjk_character(text[-1]):
text += token
else:
text += ' '
text += token
text = re.sub(' +', ' ', text)
text = re.sub('\' (re|m|s|t|ve|d|ll) ', '\'\\1 ', text)
punctuation = self._cjk_punctuation() + '+-/={(<['
punctuation_regex = '|'.join([re.escape(p) for p in punctuation])
punctuation_regex = '(%s) ' % punctuation_regex
text = re.sub(punctuation_regex, '\\1', text)
text = re.sub('(\d\.) (\d)', '\\1\\2', text)
return text.strip()
def stem(token):
"""获取token的“词干”(如果是##开头,则自动去掉##)
"""
if token[:2] == '##':
return token[2:]
else:
return token
def _is_space(ch):
"""空格类字符判断
"""
return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or \
unicodedata.category(ch) == 'Zs'
def _is_punctuation(ch):
"""标点符号类字符判断(全/半角均在此内)
提醒:unicodedata.category这个函数在py2和py3下的
表现可能不一样,比如u'§'字符,在py2下的结果为'So',
在py3下的结果是'Po'。
"""
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\u00b7\uff01\uff1f\uff61\u3002'
def _is_cjk_character(ch):
"""CJK类字符判断(包括中文字符也在此列)
参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
"""
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
def _is_control(ch):
"""控制类字符判断
"""
return unicodedata.category(ch) in ('Cc', 'Cf')
def _is_special(ch):
"""判断是不是有特殊含义的符号
"""
return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')
def _is_redundant(token):
"""判断该token是否冗余(默认情况下不可能分出来)
"""
if len(token) > 1:
for ch in Tokenizer.stem(token):
if (
Tokenizer._is_cjk_character(ch) or
Tokenizer._is_punctuation(ch)
):
return True
def rematch(self, text, tokens):
"""给出原始的text和tokenize后的tokens的映射关系
"""
if is_py2:
text = unicode(text)
if self._do_lower_case:
text = text.lower()
normalized_text, char_mapping = '', []
for i, ch in enumerate(text):
if self._do_lower_case:
ch = lowercase_and_normalize(ch, self.never_split)
ch = ''.join([
c for c in ch
if not (ord(c) == 0 or ord(c) == 0xfffd or self._is_control(c))
])
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = normalized_text, [], 0
for token in tokens:
if self._is_special(token):
token_mapping.append([])
else:
token = self.stem(token)
start = text[offset:].index(token) + offset
end = start + len(token)
token_mapping.append(char_mapping[start:end])
offset = end
return token_mapping
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(dict_path, encoding="utf-8", simplified=False, startswith=None)` to solve the following problem:
加载词典文件到dict
Here is the function:
def load_vocab(dict_path, encoding="utf-8", simplified=False, startswith=None):
"""加载词典文件到dict"""
token_dict = collections.OrderedDict()
index = 0
with open(dict_path, "r", encoding=encoding) as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
token_dict[token] = index
index += 1
if simplified: # 过滤冗余部分token,如[unused1]
new_token_dict, keep_tokens = {}, []
startswith = startswith or []
for t in startswith:
new_token_dict[t] = len(new_token_dict)
keep_tokens.append(token_dict[t])
for t, _ in sorted(token_dict.items(), key=lambda s: s[1]):
if t not in new_token_dict and not Tokenizer._is_redundant(t):
new_token_dict[t] = len(new_token_dict)
keep_tokens.append(token_dict[t])
return new_token_dict, keep_tokens
else:
return token_dict | 加载词典文件到dict |
20,970 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
from typing import Any, Literal
import unicodedata
from io import open
from bert4torch.snippets import truncate_sequences, is_string, lowercase_and_normalize, sequence_padding
import re
import six
from collections import OrderedDict
import torch
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
去除文本中的空白符
Here is the function:
def whitespace_tokenize(text):
"""去除文本中的空白符"""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | 去除文本中的空白符 |
20,971 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
from typing import Any, Literal
import unicodedata
from io import open
from bert4torch.snippets import truncate_sequences, is_string, lowercase_and_normalize, sequence_padding
import re
import six
from collections import OrderedDict
import torch
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
20,972 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
from typing import Any, Literal
import unicodedata
from io import open
from bert4torch.snippets import truncate_sequences, is_string, lowercase_and_normalize, sequence_padding
import re
import six
from collections import OrderedDict
import torch
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | Checks whether `chars` is a control character. |
20,973 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
from typing import Any, Literal
import unicodedata
from io import open
from bert4torch.snippets import truncate_sequences, is_string, lowercase_and_normalize, sequence_padding
import re
import six
from collections import OrderedDict
import torch
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
20,974 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
from typing import Any, Literal
import unicodedata
from io import open
from bert4torch.snippets import truncate_sequences, is_string, lowercase_and_normalize, sequence_padding
import re
import six
from collections import OrderedDict
import torch
The provided code snippet includes necessary dependencies for implementing the `convert_to_unicode` function. Write a Python function `def convert_to_unicode(text)` to solve the following problem:
Converts `text` to Unicode (if it's not already), assuming utf-8 input.
Here is the function:
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text))) | Converts `text` to Unicode (if it's not already), assuming utf-8 input. |
20,975 | import math
import torch
from torch import nn
from packaging import version
The provided code snippet includes necessary dependencies for implementing the `_gelu_python` function. Write a Python function `def _gelu_python(x)` to solve the following problem:
Original Implementation of the GELU activation function in Google BERT repo when initially created. For information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
Here is the function:
def _gelu_python(x):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Original Implementation of the GELU activation function in Google BERT repo when initially created. For information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 |
20,976 | import math
import torch
from torch import nn
from packaging import version
The provided code snippet includes necessary dependencies for implementing the `_gelu_new` function. Write a Python function `def _gelu_new(x)` to solve the following problem:
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
Here is the function:
def _gelu_new(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) | Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 |
20,977 | import math
import torch
from torch import nn
from packaging import version
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) | null |
20,978 | import math
import torch
from torch import nn
from packaging import version
def quick_gelu(x):
return x * torch.sigmoid(1.702 * x) | null |
20,979 | import math
import torch
from torch import nn
from packaging import version
The provided code snippet includes necessary dependencies for implementing the `_silu_python` function. Write a Python function `def _silu_python(x)` to solve the following problem:
See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with later.
Here is the function:
def _silu_python(x):
"""
See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
later.
"""
return x * torch.sigmoid(x) | See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with later. |
20,980 | import math
import torch
from torch import nn
from packaging import version
The provided code snippet includes necessary dependencies for implementing the `_mish_python` function. Write a Python function `def _mish_python(x)` to solve the following problem:
See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also visit the official repository for the paper: https://github.com/digantamisra98/Mish
Here is the function:
def _mish_python(x):
"""
See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
visit the official repository for the paper: https://github.com/digantamisra98/Mish
"""
return x * torch.tanh(nn.functional.softplus(x)) | See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also visit the official repository for the paper: https://github.com/digantamisra98/Mish |
20,981 | import math
import torch
from torch import nn
from packaging import version
def linear_act(x):
return x | null |
20,982 | import math
import torch
from torch import nn
from packaging import version
def swiglu(x, dim=-1):
x = torch.chunk(x, 2, dim=dim)
return silu(x[0]) * x[1] | null |
20,983 | import math
import torch
from torch import nn
from packaging import version
ACT2FN = {
"relu": nn.functional.relu,
"silu": silu,
"swish": silu,
"swiglu": swiglu,
"gelu": gelu,
"tanh": torch.tanh,
"gelu_new": _gelu_new,
"gelu_fast": gelu_fast,
"quick_gelu": quick_gelu,
"mish": mish,
"linear": linear_act,
"sigmoid": torch.sigmoid,
"softmax": nn.Softmax(dim=-1)
}
The provided code snippet includes necessary dependencies for implementing the `get_activation` function. Write a Python function `def get_activation(activation_string)` to solve the following problem:
根据activation_string返回对应的激活函数 :param activation_string: str, 传入的激活函数名 :return: Any
Here is the function:
def get_activation(activation_string):
'''根据activation_string返回对应的激活函数
:param activation_string: str, 传入的激活函数名
:return: Any
'''
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}") | 根据activation_string返回对应的激活函数 :param activation_string: str, 传入的激活函数名 :return: Any |
20,984 | import numpy as np
import math
import os
import torch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model
from bert4torch.snippets import sequence_padding
import re
The provided code snippet includes necessary dependencies for implementing the `get_bool_ids_greater_than` function. Write a Python function `def get_bool_ids_greater_than(probs, limit=0.5, return_prob=False)` to solve the following problem:
Get idx of the last dimension in probability arrays, which is greater than a limitation. Args: probs (List[List[float]]): The input probability arrays. limit (float): The limitation for probability. return_prob (bool): Whether to return the probability Returns: List[List[int]]: The index of the last dimension meet the conditions.
Here is the function:
def get_bool_ids_greater_than(probs, limit=0.5, return_prob=False):
"""
Get idx of the last dimension in probability arrays, which is greater than a limitation.
Args:
probs (List[List[float]]): The input probability arrays.
limit (float): The limitation for probability.
return_prob (bool): Whether to return the probability
Returns:
List[List[int]]: The index of the last dimension meet the conditions.
"""
probs = np.array(probs)
dim_len = len(probs.shape)
if dim_len > 1:
result = []
for p in probs:
result.append(get_bool_ids_greater_than(p, limit, return_prob))
return result
else:
result = []
for i, p in enumerate(probs):
if p > limit:
if return_prob:
result.append((i, p))
else:
result.append(i)
return result | Get idx of the last dimension in probability arrays, which is greater than a limitation. Args: probs (List[List[float]]): The input probability arrays. limit (float): The limitation for probability. return_prob (bool): Whether to return the probability Returns: List[List[int]]: The index of the last dimension meet the conditions. |
20,985 | import numpy as np
import math
import os
import torch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model
from bert4torch.snippets import sequence_padding
import re
The provided code snippet includes necessary dependencies for implementing the `get_span` function. Write a Python function `def get_span(start_ids, end_ids, with_prob=False)` to solve the following problem:
Get span set from position start and end list. Args: start_ids (List[int]/List[tuple]): The start index list. end_ids (List[int]/List[tuple]): The end index list. with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability). Returns: set: The span set without overlapping, every id can only be used once .
Here is the function:
def get_span(start_ids, end_ids, with_prob=False):
"""
Get span set from position start and end list.
Args:
start_ids (List[int]/List[tuple]): The start index list.
end_ids (List[int]/List[tuple]): The end index list.
with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability).
Returns:
set: The span set without overlapping, every id can only be used once .
"""
if with_prob:
start_ids = sorted(start_ids, key=lambda x: x[0])
end_ids = sorted(end_ids, key=lambda x: x[0])
else:
start_ids = sorted(start_ids)
end_ids = sorted(end_ids)
start_pointer = 0
end_pointer = 0
len_start = len(start_ids)
len_end = len(end_ids)
couple_dict = {}
while start_pointer < len_start and end_pointer < len_end:
if with_prob:
start_id = start_ids[start_pointer][0]
end_id = end_ids[end_pointer][0]
else:
start_id = start_ids[start_pointer]
end_id = end_ids[end_pointer]
if start_id == end_id:
couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]
start_pointer += 1
end_pointer += 1
continue
if start_id < end_id:
couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]
start_pointer += 1
continue
if start_id > end_id:
end_pointer += 1
continue
result = [(couple_dict[end], end) for end in couple_dict]
result = set(result)
return result | Get span set from position start and end list. Args: start_ids (List[int]/List[tuple]): The start index list. end_ids (List[int]/List[tuple]): The end index list. with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability). Returns: set: The span set without overlapping, every id can only be used once . |
20,986 | import numpy as np
import math
import os
import torch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model
from bert4torch.snippets import sequence_padding
import re
def get_id_and_prob(spans, offset_map):
prompt_length = 0
for i in range(1, len(offset_map)):
if offset_map[i] != [0, 0]:
prompt_length += 1
else:
break
for i in range(1, prompt_length + 1):
offset_map[i][0] -= (prompt_length + 1)
offset_map[i][1] -= (prompt_length + 1)
sentence_id = []
prob = []
for start, end in spans:
prob.append(start[1] * end[1])
sentence_id.append(
(offset_map[start[0]][0], offset_map[end[0]][1]))
return sentence_id, prob | null |
20,987 | import numpy as np
import math
import os
import torch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model
from bert4torch.snippets import sequence_padding
import re
The provided code snippet includes necessary dependencies for implementing the `cut_chinese_sent` function. Write a Python function `def cut_chinese_sent(para)` to solve the following problem:
Cut the Chinese sentences more precisely, reference to "https://blog.csdn.net/blmoistawinde/article/details/82379256".
Here is the function:
def cut_chinese_sent(para):
"""
Cut the Chinese sentences more precisely, reference to
"https://blog.csdn.net/blmoistawinde/article/details/82379256".
"""
para = re.sub(r'([。!?\?])([^”’])', r'\1\n\2', para)
para = re.sub(r'(\.{6})([^”’])', r'\1\n\2', para)
para = re.sub(r'(\…{2})([^”’])', r'\1\n\2', para)
para = re.sub(r'([。!?\?][”’])([^,。!?\?])', r'\1\n\2', para)
para = para.rstrip()
return para.split("\n") | Cut the Chinese sentences more precisely, reference to "https://blog.csdn.net/blmoistawinde/article/details/82379256". |
20,988 | import numpy as np
import math
import os
import torch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model
from bert4torch.snippets import sequence_padding
import re
def dbc2sbc(s):
rs = ""
for char in s:
code = ord(char)
if code == 0x3000:
code = 0x0020
else:
code -= 0xfee0
if not (0x0021 <= code and code <= 0x7e):
rs += char
continue
rs += chr(code)
return rs | null |
20,989 | import time
import json
import requests
from contextlib import asynccontextmanager
from typing import Any, Dict, List, Literal, Optional, Union
from bert4torch.snippets import log_info, log_warn, cuda_empty_cache, AnyClass
from bert4torch.snippets import is_fastapi_available, is_pydantic_available, is_sseclient_available
from packaging import version
from .base import Chat
import gc
import threading
async def lifespan(app: FastAPI): # collects GPU memory
yield
cuda_empty_cache() | null |
20,990 | import time
import json
import requests
from contextlib import asynccontextmanager
from typing import Any, Dict, List, Literal, Optional, Union
from bert4torch.snippets import log_info, log_warn, cuda_empty_cache, AnyClass
from bert4torch.snippets import is_fastapi_available, is_pydantic_available, is_sseclient_available
from packaging import version
from .base import Chat
import gc
import threading
class ChatOpenaiApi(Chat):
"""OpenAi的api
:param model_path: str, 模型所在的文件夹地址
:param name: str, 模型名称
:param generation_config: dict, 模型generate的参数设置
:param route_api: str, api的路由
:param route_models: str, 模型列表的路由
:param offload_when_nocall: str, 是否在一定时长内无调用就卸载模型,可以卸载到内存和disk两种
:param max_callapi_interval: int, 最长调用间隔
:param scheduler_interval: int, 定时任务的执行间隔
Example
------------------------
# 以chatglm2的api部署为例
from bert4torch.pipelines import ChatGlm2OpenaiApi
model_path = "E:/pretrain_ckpt/glm/chatglm2-6B"
generation_config = {'mode':'random_sample',
'max_length':2048,
'default_rtype':'logits',
'use_states':True
}
chat = ChatGlm2OpenaiApi(model_path, **generation_config)
chat.run()
# TODO:
1. 在后续调用服务,模型从cpu转到cuda上时,内存不下降,猜测是因为不同线程中操作导致的
2. 偶然会发生调用的时候,主线程和定时线程打架,导致device不一致的错误
3. 如何offload到disk上,不占用内存和显存
"""
def __init__(self, model_path, name='default', route_api='/chat/completions', route_models='/models',
max_callapi_interval=24*3600, scheduler_interval=10*60, offload_when_nocall:Literal['cpu', 'disk']=None, **kwargs):
self.offload_when_nocall = offload_when_nocall
if offload_when_nocall is not None:
kwargs['create_model_at_startup'] = False
super().__init__(model_path, **kwargs)
assert is_fastapi_available(), "No module found, use `pip install fastapi`"
from sse_starlette.sse import ServerSentEvent, EventSourceResponse
import sse_starlette
if version.parse(sse_starlette.__version__) > version.parse('1.8'):
log_warn('Module `sse_starlette` above 1.8 not support stream output')
self.max_callapi_interval = max_callapi_interval # 最长调用间隔
self.scheduler_interval = scheduler_interval
self.EventSourceResponse = EventSourceResponse
self.name = name
self.role_user = 'user'
self.role_assistant = 'assistant'
self.role_system = 'system'
if offload_when_nocall is None:
self.app = FastAPI(lifespan=lifespan)
else:
# 启用后台任务,监控接口调用次数
self.app = FastAPI()
self.app.add_event_handler("startup", self.startup_event)
self.app.add_event_handler("shutdown", lambda: self.shutdown_event(self.app.state.scheduler))
self.lock = threading.Lock()
self.app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# 添加路由
router = APIRouter()
router.add_api_route(route_models, methods=['GET'], endpoint=self.list_models, response_model=_ModelList)
router.add_api_route(route_api, methods=['POST'], endpoint=self.create_chat_completion, response_model=_ChatCompletionResponse)
self.app.include_router(router)
def startup_event(self):
from apscheduler.schedulers.background import BackgroundScheduler
scheduler = BackgroundScheduler()
scheduler.add_job(self.check_last_call, 'interval', seconds=self.scheduler_interval)
scheduler.start()
self.app.state.scheduler = scheduler # 将调度器存储在app的状态中,以便在shutdown时使用
def shutdown_event(scheduler):
if scheduler:
scheduler.shutdown()
async def list_models(self):
model_card = _ModelCard(id=self.name)
return _ModelList(data=[model_card])
async def create_chat_completion(self, request: _ChatCompletionRequest):
if request.temperature:
self.generation_config['temperature'] = request.temperature
if request.top_p:
self.generation_config['top_p'] = request.top_p
if request.top_k:
self.generation_config['top_k'] = request.top_k
if request.max_length:
self.generation_config['max_length'] = request.max_length
if request.repetition_penalty:
self.generation_config['repetition_penalty'] = request.repetition_penalty
if request.messages[-1].role != self.role_user:
raise HTTPException(status_code=400, detail="Invalid request")
query = request.messages[-1].content
prev_messages = request.messages[:-1]
if len(prev_messages) > 0 and prev_messages[0].role == self.role_system:
query = prev_messages.pop(0).content + query
history = []
if len(prev_messages) % 2 == 0:
for i in range(0, len(prev_messages), 2):
if prev_messages[i].role == self.role_user and prev_messages[i+1].role == self.role_assistant:
history.append((prev_messages[i].content, prev_messages[i+1].content))
else:
raise HTTPException(status_code=400, detail=f'Arg `messages` do not follow {self.role_user}, \
{self.role_assistant} format.')
else:
log_warn(f'prev_messages={len(prev_messages)}%2 != 0, use current query without history instead.')
input_text = self.build_prompt(query, history)
if self.offload_when_nocall is None:
self.model = self.build_model()
else:
with self.lock:
self.model = self.build_model()
self.last_callapi_timestamp = time.time()
# 流式输出
if request.stream:
generate = self.predict(input_text, request.model)
return self.EventSourceResponse(generate, media_type="text/event-stream")
# 非流式输出
else:
response = self.model.generate(input_text, **self.generation_config)
choice_data = _ChatCompletionResponseChoice(
index=0,
message=_ChatMessage(role=self.role_assistant, content=response),
finish_reason="stop"
)
return _ChatCompletionResponse(model=request.model, choices=[choice_data], object="chat.completion")
async def predict(self, query: str, model_id: str):
choice_data = _ChatCompletionResponseStreamChoice(
index=0,
delta=_DeltaMessage(role=self.role_assistant),
finish_reason=None
)
chunk = _ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
yield "{}".format(chunk.model_dump_json(exclude_unset=True))
current_length = 0
for new_response in self.model.stream_generate(query, **self.generation_config):
if len(new_response) == current_length:
continue
new_text = new_response[current_length:]
current_length = len(new_response)
choice_data = _ChatCompletionResponseStreamChoice(
index=0,
delta=_DeltaMessage(content=new_text),
finish_reason=None
)
chunk = _ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
yield "{}".format(chunk.model_dump_json(exclude_unset=True))
choice_data = _ChatCompletionResponseStreamChoice(
index=0,
delta=_DeltaMessage(),
finish_reason="stop"
)
chunk = _ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
yield "{}".format(chunk.model_dump_json(exclude_unset=True))
yield '[DONE]'
def check_last_call(self):
'''检测距离上一次调用超出规定时间段'''
now = time.time()
if not hasattr(self, 'model') or (self.model is None):
return
elif not hasattr(self, 'last_callapi_timestamp'):
self.last_callapi_timestamp = now
elif now - self.last_callapi_timestamp > self.max_callapi_interval: # 超出调用间隔
if (self.offload_when_nocall == 'cpu') and (str(self.model.device) != 'cpu'):
with self.lock:
# 如果没有调用,将模型转移到CPU
self.model.to('cpu')
log_info(f"Model moved to cpu due to no activity for {self.max_callapi_interval} sec.")
gc.collect()
cuda_empty_cache()
elif (self.offload_when_nocall == 'disk') and hasattr(self, 'model'):
with self.lock:
self.model = None
del self.model
log_info(f"Model moved to disk due to no activity for {self.max_callapi_interval} sec.")
gc.collect()
cuda_empty_cache()
def run(self, app:str=None, host:str="0.0.0.0", port:int=8000, **kwargs):
'''主程序入口'''
import uvicorn
uvicorn.run(app or self.app, host=host, port=port, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `extend_with_chat_openai_api` function. Write a Python function `def extend_with_chat_openai_api(InputModel)` to solve the following problem:
添加ChatWebDemo
Here is the function:
def extend_with_chat_openai_api(InputModel):
"""添加ChatWebDemo"""
class ChatDemo(InputModel, ChatOpenaiApi):
pass
return ChatDemo | 添加ChatWebDemo |
20,991 | import os
import torch
from typing import Union, Optional
from bert4torch.models import build_transformer_model
from bert4torch.snippets import log_warn_once, cuda_empty_cache, is_streamlit_available, log_info
from packaging import version
import gc
class ChatCli(Chat):
'''在命令行中交互的demo'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_str = "输入内容进行对话,clear清空对话历史,stop终止程序"
self.history_maxlen = 3
def build_cli_text(self, history):
'''构建命令行终端显示的text'''
prompt = self.init_str
for query, response in history:
prompt += f"\n\nUser:{query}"
prompt += f"\n\nAssistant:{response}"
return prompt
def run(self, stream=True):
import platform
os_name = platform.system()
previous_history, history = [], []
clear_command = 'cls' if os_name == 'Windows' else 'clear'
print(self.init_str)
while True:
query = input("\nUser: ")
if query.strip() == "stop":
break
if query.strip() == "clear":
previous_history, history = [], []
if 'states' in self.generation_config:
self.generation_config.pop('states')
cuda_empty_cache()
os.system(clear_command)
print(self.init_str)
continue
prompt = self.build_prompt(query, history)
self.model = self.build_model()
if stream:
for response in self.model.stream_generate(prompt, **self.generation_config):
response = self.process_response(response, history)
new_history = history + [(query, response)]
os.system(clear_command)
print(self.build_cli_text(previous_history + new_history), flush=True)
else:
response = self.model.generate(prompt, **self.generation_config)
response = self.process_response(response, history)
new_history = history + [(query, response)]
os.system(clear_command)
print(self.build_cli_text(previous_history + new_history), flush=True)
history = new_history[-self.history_maxlen:]
if len(new_history) > self.history_maxlen:
previous_history += new_history[:-self.history_maxlen]
cuda_empty_cache()
The provided code snippet includes necessary dependencies for implementing the `extend_with_cli` function. Write a Python function `def extend_with_cli(InputModel)` to solve the following problem:
添加ChatCliDemo
Here is the function:
def extend_with_cli(InputModel):
"""添加ChatCliDemo"""
class ChatDemo(InputModel, ChatCli):
pass
return ChatDemo | 添加ChatCliDemo |
20,992 | import os
import torch
from typing import Union, Optional
from bert4torch.models import build_transformer_model
from bert4torch.snippets import log_warn_once, cuda_empty_cache, is_streamlit_available, log_info
from packaging import version
import gc
class ChatWebGradio(Chat):
'''gradio实现的网页交互的demo
默认是stream输出,默认history不会删除,需手动清理
'''
def __init__(self, *args, max_length=4096, **kwargs):
super().__init__(*args, **kwargs)
import gradio as gr
self.gr = gr
self.max_length = max_length
self.max_repetition_penalty = 10
self.stream = True # 一般都是流式,因此未放在页面配置项
if version.parse(gr.__version__) < version.parse("3.44.4"):
log_warn_once('`gradio` changes frequently, the code is successfully tested under 3.44.4')
def reset_user_input(self):
return self.gr.update(value='')
def reset_state(self):
if 'states' in self.generation_config:
self.generation_config.pop('states')
cuda_empty_cache() # 清理显存
return [], []
def set_generation_config(self, max_length, top_p, temperature, repetition_penalty):
'''根据web界面的参数修改生成参数'''
self.generation_config['max_length'] = max_length
self.generation_config['top_p'] = top_p
self.generation_config['temperature'] = temperature
self.generation_config['repetition_penalty'] = repetition_penalty
def __stream_predict(self, input, chatbot, history, max_length, top_p, temperature, repetition_penalty):
'''流式生成'''
self.set_generation_config(max_length, top_p, temperature, repetition_penalty)
chatbot.append((input, ""))
input_text = self.build_prompt(input, history)
self.model = self.build_model()
for response in self.model.stream_generate(input_text, **self.generation_config):
response = self.process_response(response, history)
chatbot[-1] = (input, response)
new_history = history + [(input, response)]
yield chatbot, new_history
cuda_empty_cache() # 清理显存
def __predict(self, input, chatbot, history, max_length, top_p, temperature, repetition_penalty):
'''一次性生成'''
self.set_generation_config(max_length, top_p, temperature, repetition_penalty)
chatbot.append((input, ""))
input_text = self.build_prompt(input, history)
self.model = self.build_model()
response = self.model.generate(input_text, **self.generation_config)
response = self.process_response(response, history)
chatbot[-1] = (input, response)
new_history = history + [(input, response)]
cuda_empty_cache() # 清理显存
return chatbot, new_history
def run(self, **launch_configs):
with self.gr.Blocks() as demo:
self.gr.HTML("""<h1 align="center">Chabot Web Demo</h1>""")
chatbot = self.gr.Chatbot()
with self.gr.Row():
with self.gr.Column(scale=4):
with self.gr.Column(scale=12):
user_input = self.gr.Textbox(show_label=False, placeholder="Input...", lines=10) # .style(container=False)
with self.gr.Column(min_width=32, scale=1):
submitBtn = self.gr.Button("Submit", variant="primary")
with self.gr.Column(scale=1):
emptyBtn = self.gr.Button("Clear History")
max_length = self.gr.Slider(0, self.max_length, value=self.max_length//2, step=1.0, label="Maximum length", interactive=True)
top_p = self.gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = self.gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
repetition_penalty = self.gr.Slider(0, self.max_repetition_penalty, value=1, step=0.1, label="Repetition penalty", interactive=True)
history = self.gr.State([])
if self.stream:
submitBtn.click(self.__stream_predict, [user_input, chatbot, history, max_length, top_p, temperature, repetition_penalty], [chatbot, history], show_progress=True)
else:
submitBtn.click(self.__predict, [user_input, chatbot, history, max_length, top_p, temperature, repetition_penalty], [chatbot, history], show_progress=True)
submitBtn.click(self.reset_user_input, [], [user_input])
emptyBtn.click(self.reset_state, outputs=[chatbot, history], show_progress=True)
demo.queue().launch(**launch_configs)
The provided code snippet includes necessary dependencies for implementing the `extend_with_web_gradio` function. Write a Python function `def extend_with_web_gradio(InputModel)` to solve the following problem:
添加ChatWebDemo
Here is the function:
def extend_with_web_gradio(InputModel):
"""添加ChatWebDemo"""
class ChatDemo(InputModel, ChatWebGradio):
pass
return ChatDemo | 添加ChatWebDemo |
20,993 | import os
import torch
from typing import Union, Optional
from bert4torch.models import build_transformer_model
from bert4torch.snippets import log_warn_once, cuda_empty_cache, is_streamlit_available, log_info
from packaging import version
import gc
class ChatWebStreamlit(Chat):
def __init__(self, *args, max_length=4096, **kwargs):
if not is_streamlit_available():
raise ModuleNotFoundError('pip install streamlit')
if version.parse(st.__version__) < version.parse("1.29.0"):
log_warn_once('`streamlit` is successfully tested under 1.29.0')
st.set_page_config(
page_title="Chabot Web Demo",
page_icon=":robot:",
layout="wide"
)
super().__init__(*args, **kwargs)
self.max_length = max_length
def build_model(_self):
return super().build_model()
def build_tokenizer(_self):
return super().build_tokenizer()
def run(self):
if "history" not in st.session_state:
st.session_state.history = []
if "states" not in st.session_state:
st.session_state.states = None
max_length = st.sidebar.slider("max_length", 0, self.max_length, self.max_length//2, step=1)
top_p = st.sidebar.slider("top_p", 0.0, 1.0, 0.8, step=0.01)
temperature = st.sidebar.slider("temperature", 0.0, 1.0, 0.6, step=0.01)
buttonClean = st.sidebar.button("清理会话历史", key="clean")
if buttonClean:
st.session_state.history = []
st.session_state.states = None
cuda_empty_cache()
st.rerun()
for i, message in enumerate(st.session_state.history):
with st.chat_message(name="user", avatar="user"):
st.markdown(message[0])
with st.chat_message(name="assistant", avatar="assistant"):
st.markdown(message[1])
with st.chat_message(name="user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message(name="assistant", avatar="assistant"):
message_placeholder = st.empty()
prompt_text = st.chat_input("请输入您的问题")
if prompt_text:
input_placeholder.markdown(prompt_text)
history = st.session_state.history
states = st.session_state.states
self.generation_config['max_length'] = max_length
self.generation_config['top_p'] = top_p
self.generation_config['temperature'] = temperature
self.generation_config['states'] = states
input_text = self.build_prompt(prompt_text, history)
for response in self.model.stream_generate(input_text, **self.generation_config):
response = self.process_response(response, history)
message_placeholder.markdown(response)
st.session_state.history = history + [(prompt_text, response)]
st.session_state.states = self.generation_config.get('states')
The provided code snippet includes necessary dependencies for implementing the `extend_with_web_streamlit` function. Write a Python function `def extend_with_web_streamlit(InputModel)` to solve the following problem:
添加ChatWebDemo
Here is the function:
def extend_with_web_streamlit(InputModel):
"""添加ChatWebDemo"""
class ChatDemo(InputModel, ChatWebStreamlit):
pass
return ChatDemo | 添加ChatWebDemo |
20,994 | from torch.nn import Linear, Embedding
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import bz2
import torch
import base64
import ctypes
from typing import List
import re
from tqdm import tqdm
from functools import partial
import inspect
from bert4torch.snippets import log_error
def compress_int4_weight(weight: torch.Tensor): # (n, m)
with torch.cuda.device(weight.device):
n, m = weight.size(0), weight.size(1)
assert m % 2 == 0
m = m // 2
out = torch.empty(n, m, dtype=torch.int8, device="cuda")
stream = torch.cuda.current_stream()
gridDim = (n, 1, 1)
blockDim = (min(round_up(m, 32), 1024), 1, 1)
kernels.int4WeightCompression(
gridDim,
blockDim,
0,
stream,
[ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
)
return out | null |
20,995 | from torch.nn import Linear, Embedding
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import bz2
import torch
import base64
import ctypes
from typing import List
import re
from tqdm import tqdm
from functools import partial
import inspect
from bert4torch.snippets import log_error
def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
if source_bit_width == 8:
func = kernels.int8WeightExtractionHalf
elif source_bit_width == 4:
func = kernels.int4WeightExtractionHalf
else:
assert False, "Unsupported bit-width"
with torch.cuda.device(weight.device):
n, m = weight.size(0), weight.size(1)
out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda")
stream = torch.cuda.current_stream()
gridDim = (n, 1, 1)
blockDim = (min(round_up(m, 32), 1024), 1, 1)
func(
gridDim,
blockDim,
0,
stream,
[
ctypes.c_void_p(weight.data_ptr()),
ctypes.c_void_p(scale_list.data_ptr()),
ctypes.c_void_p(out.data_ptr()),
ctypes.c_int32(n),
ctypes.c_int32(m),
],
)
return out | null |
20,996 | from torch.nn import Linear, Embedding
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import bz2
import torch
import base64
import ctypes
from typing import List
import re
from tqdm import tqdm
from functools import partial
import inspect
from bert4torch.snippets import log_error
class CacheTensor():
def __init__(self, *args, **kwargs):
self.tensor = torch.empty(*args, **kwargs)
def to(self, *args, **kwargs):
self.tensor = self.tensor.to(*args, **kwargs)
def data_ptr(self):
return self.tensor.data_ptr()
class QuantizedLinear(Linear):
def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, quantized_weight=None,
quantized_weight_scale=None, quantization_cache=None, empty_init=False, *args, **kwargs):
super(QuantizedLinear, self).__init__(*args, **kwargs)
self.weight_bit_width = weight_bit_width
self.quantization_cache = quantization_cache
if (quantized_weight is not None) and (quantized_weight_scale is not None):
del self.weight
self.weight = Parameter(quantized_weight.to(kwargs["device"]), requires_grad=False)
self.weight_scale = Parameter(quantized_weight_scale.to(kwargs["device"]), requires_grad=False)
else:
shape = self.weight.shape
del self.weight
if weight_tensor is None or empty_init:
self.weight = torch.empty(
shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"]
)
self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"])
else:
self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).to(kwargs["dtype"])
self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8)
if weight_bit_width == 4:
self.weight = compress_int4_weight(self.weight)
self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False)
self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False)
if bias_tensor is not None:
self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False)
else:
self.bias = None
def reset_parameters(self):
"""To accelerate initialization"""
pass
def forward(self, input):
output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
if self.bias is not None:
output = output + self.bias
return output
def _apply(self, fn):
self_obj = super()._apply(fn)
if self.quantization_cache is not None:
self.quantization_cache.to(self_obj.weight.device)
self.quantization_cache.to(self_obj.weight_scale.dtype)
return self_obj
The provided code snippet includes necessary dependencies for implementing the `quantize_cpm_kernels` function. Write a Python function `def quantize_cpm_kernels(model, quantization_bit, use_quantization_cache=False, empty_init=False, target_modules=None, **kwargs)` to solve the following problem:
从chagglm-6b移植过来的的量化,方便以int8和int4进行推理 源链接:https://huggingface.co/THUDM/chatglm-6b/blob/main/quantization.py Replace fp16 linear with quantized linear 这里修改了hard code, 可以适配其他模型 target_modules: str/list, 指定对某些层做量化
Here is the function:
def quantize_cpm_kernels(model, quantization_bit, use_quantization_cache=False, empty_init=False, target_modules=None, **kwargs):
"""从chagglm-6b移植过来的的量化,方便以int8和int4进行推理
源链接:https://huggingface.co/THUDM/chatglm-6b/blob/main/quantization.py
Replace fp16 linear with quantized linear
这里修改了hard code, 可以适配其他模型
target_modules: str/list, 指定对某些层做量化
"""
modules_trans = {}
for name, module in model.named_modules():
# target_modules=None, 表示对所有Linear层替换
if (target_modules is None) and isinstance(module, Linear):
modules_trans[name] = module
elif (target_modules is not None) and isinstance(module, Linear):
if isinstance(target_modules, str):
target_module_found = re.fullmatch(target_modules, name)
else:
target_module_found = any(name.endswith(target_key) for target_key in target_modules)
if target_module_found:
modules_trans[name] = module
# TODO: 暂时只支持cuda
current_device = torch.cuda.current_device()
dtype = torch.half
QuantizedLinearWithPara = partial(
QuantizedLinear,
weight_bit_width=quantization_bit,
bias=True,
dtype=dtype,
empty_init=empty_init
)
cache = dict()
for name, module in tqdm(modules_trans.items(), desc='Quantize linear layers'):
cache_name = re.sub('\.[0-9]+\.', '.', name)
if use_quantization_cache and (cache_name not in cache):
n, m = module.weight.size(0), module.weight.size(1)
cache[cache_name] = CacheTensor(n, m, dtype=dtype, device=current_device, requires_grad=False)
module_quant = QuantizedLinearWithPara(
weight_tensor=module.weight.to(current_device),
bias_tensor=module.bias,
in_features=module.in_features,
out_features=module.out_features,
device=module.weight.device,
quantization_cache=cache.get(cache_name)
)
del module
# 赋值
name_new = list(name)
for iter_ in re.finditer('\.[0-9]+\.', name):
iter_str = name[iter_.start():iter_.end()]
name_new[iter_.start():iter_.end()] = [''] * (iter_.end()-iter_.start())
name_new[iter_.start()] = '[' + iter_str[1:-1] + '].'
exec('model.' + ''.join(name_new) + ' = module_quant')
return model | 从chagglm-6b移植过来的的量化,方便以int8和int4进行推理 源链接:https://huggingface.co/THUDM/chatglm-6b/blob/main/quantization.py Replace fp16 linear with quantized linear 这里修改了hard code, 可以适配其他模型 target_modules: str/list, 指定对某些层做量化 |
20,997 | from torch.nn import Linear, Embedding
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import bz2
import torch
import base64
import ctypes
from typing import List
import re
from tqdm import tqdm
from functools import partial
import inspect
from bert4torch.snippets import log_error
The provided code snippet includes necessary dependencies for implementing the `quantize_load_in_kbit` function. Write a Python function `def quantize_load_in_kbit(model, load_in_8bit=False, load_in_4bit=False, keep_in_fp32_modules=None, llm_int8_skip_modules=None, quantization_config=None, **kwargs)` to solve the following problem:
transformer的load_in_8bit, 源自transformer源代码
Here is the function:
def quantize_load_in_kbit(model, load_in_8bit=False, load_in_4bit=False, keep_in_fp32_modules=None, llm_int8_skip_modules=None, quantization_config=None, **kwargs):
'''transformer的load_in_8bit, 源自transformer源代码'''
from transformers.utils.bitsandbytes import replace_with_bnb_linear, set_module_quantized_tensor_to_device
from transformers.utils.quantization_config import BitsAndBytesConfig
if quantization_config is None:
quantization_config, kwargs = BitsAndBytesConfig.from_dict(
config_dict={"load_in_8bit": load_in_8bit, "load_in_4bit": load_in_4bit}, return_unused_kwargs=True, **kwargs
)
elif quantization_config is not None:
load_in_8bit = quantization_config.load_in_8bit
load_in_4bit = quantization_config.load_in_4bit
quantization_config_kwargs = {
k: v for k, v in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters
}
if len(quantization_config_kwargs) > 0:
raise ValueError(
"You can't pass `load_in_8bit` or any other `BitsAndBytesConfig` argument as a kwarg when passing "
"`quantization_config` argument at the same time."
)
load_in_8bit_skip_modules = quantization_config.llm_int8_skip_modules or []
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
modules_to_not_convert = load_in_8bit_skip_modules
if not isinstance(modules_to_not_convert, list):
modules_to_not_convert = [modules_to_not_convert]
modules_to_not_convert.extend([] if keep_in_fp32_modules is None else keep_in_fp32_modules)
modules_to_not_convert.extend([] if llm_int8_skip_modules is None else llm_int8_skip_modules)
state_dict = model.state_dict()
model = replace_with_bnb_linear(model, modules_to_not_convert=modules_to_not_convert, quantization_config=quantization_config)
for key, param in model.named_parameters():
if param.device == torch.device("meta"):
set_module_quantized_tensor_to_device(model, key, 'cpu', value=state_dict[key], fp16_statistics=None)
model.is_loaded_in_8bit = load_in_8bit
model.is_loaded_in_4bit = load_in_4bit
return model | transformer的load_in_8bit, 源自transformer源代码 |
20,998 | from torch.optim.lr_scheduler import LambdaLR
from torch.optim.optimizer import Optimizer
import torch
The provided code snippet includes necessary dependencies for implementing the `get_linear_schedule_with_warmup` function. Write a Python function `def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1)` to solve the following problem:
带warmup的schedule, 源自transformers包optimization.py中 :param num_warmup_steps: 需要warmup的步数, 一般为 num_training_steps * warmup_proportion(warmup的比例, 建议0.05-0.15) :param num_training_steps: 总的训练步数, 一般为 train_batches * num_epoch
Here is the function:
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""带warmup的schedule, 源自transformers包optimization.py中
:param num_warmup_steps: 需要warmup的步数, 一般为 num_training_steps * warmup_proportion(warmup的比例, 建议0.05-0.15)
:param num_training_steps: 总的训练步数, 一般为 train_batches * num_epoch
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch) | 带warmup的schedule, 源自transformers包optimization.py中 :param num_warmup_steps: 需要warmup的步数, 一般为 num_training_steps * warmup_proportion(warmup的比例, 建议0.05-0.15) :param num_training_steps: 总的训练步数, 一般为 train_batches * num_epoch |
20,999 | from torch.optim.lr_scheduler import LambdaLR
from torch.optim.optimizer import Optimizer
import torch
The provided code snippet includes necessary dependencies for implementing the `extend_with_exponential_moving_average` function. Write a Python function `def extend_with_exponential_moving_average(model, decay=0.999)` to solve the following problem:
模型权重的指数滑动平均, 不参加梯度更新,只是记录滑动平均的参数,给预测使用 注意区别于类似adam一类的自适应学习率优化器, 针对一阶二阶梯度的指数滑动平均, 两者完全不同 Example: >>> # 初始化 >>> ema = ExponentialMovingAverage(model, 0.999) >>> # 训练过程中, 更新完参数后, 同步update ema_weights weights >>> def train(): >>> optimizer.step() >>> ema.step() >>> # eval前, 调用apply_ema_weights(); eval之后, restore_raw_weights()恢复原来模型的参数 >>> def evaluate(): >>> ema.apply_ema_weights() >>> # evaluate >>> # 如果想保存ema后的模型, 请在restore方法之前调用torch.save() >>> ema.restore_raw_weights()
Here is the function:
def extend_with_exponential_moving_average(model, decay=0.999):
''' 模型权重的指数滑动平均, 不参加梯度更新,只是记录滑动平均的参数,给预测使用
注意区别于类似adam一类的自适应学习率优化器, 针对一阶二阶梯度的指数滑动平均, 两者完全不同
Example:
>>> # 初始化
>>> ema = ExponentialMovingAverage(model, 0.999)
>>> # 训练过程中, 更新完参数后, 同步update ema_weights weights
>>> def train():
>>> optimizer.step()
>>> ema.step()
>>> # eval前, 调用apply_ema_weights(); eval之后, restore_raw_weights()恢复原来模型的参数
>>> def evaluate():
>>> ema.apply_ema_weights()
>>> # evaluate
>>> # 如果想保存ema后的模型, 请在restore方法之前调用torch.save()
>>> ema.restore_raw_weights()
'''
class ExponentialMovingAverage():
def __init__(self, model, decay):
self.model = model
self.decay = decay
# 保存ema权重(当前step的每一层的滑动平均权重)
self.ema_weights = {}
# 在进行evaluate的时候, 保存原始的模型权重, 当执行完evaluate后, 从ema权重恢复到原始权重
self.model_weights = {}
# 初始化ema_weights为model_weights
for name, param in self.model.named_parameters():
if param.requires_grad:
self.ema_weights[name] = param.data.clone()
def step(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.ema_weights
new_average = (1.0 - self.decay) * param.data + self.decay * self.ema_weights[name]
self.ema_weights[name] = new_average.clone()
def apply_ema_weights(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.ema_weights
self.model_weights[name] = param.data
param.data = self.ema_weights[name]
def restore_raw_weights(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.model_weights
param.data = self.model_weights[name]
self.model_weights = {}
return ExponentialMovingAverage(model, decay) | 模型权重的指数滑动平均, 不参加梯度更新,只是记录滑动平均的参数,给预测使用 注意区别于类似adam一类的自适应学习率优化器, 针对一阶二阶梯度的指数滑动平均, 两者完全不同 Example: >>> # 初始化 >>> ema = ExponentialMovingAverage(model, 0.999) >>> # 训练过程中, 更新完参数后, 同步update ema_weights weights >>> def train(): >>> optimizer.step() >>> ema.step() >>> # eval前, 调用apply_ema_weights(); eval之后, restore_raw_weights()恢复原来模型的参数 >>> def evaluate(): >>> ema.apply_ema_weights() >>> # evaluate >>> # 如果想保存ema后的模型, 请在restore方法之前调用torch.save() >>> ema.restore_raw_weights() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.