id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
19,473 | import logging
import math
import os
from collections import OrderedDict
import copy
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.parameter import Parameter
def swish(x):
return x * torch.sigmoid(x) | null |
19,474 | import logging
import math
import os
from collections import OrderedDict
import copy
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.parameter import Parameter
The provided code snippet includes necessary dependencies for implementing the `_gelu_python` function. Write a Python function `def _gelu_python(x)` to solve the following problem:
Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in torch.nn.functional Also see https://arxiv.org/abs/1606.08415
Here is the function:
def _gelu_python(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
This is now written in C in torch.nn.functional
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in torch.nn.functional Also see https://arxiv.org/abs/1606.08415 |
19,475 | import logging
import math
import os
from collections import OrderedDict
import argparse
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
def add_optimizer_params(parser: argparse.ArgumentParser):
parser.add_argument('--lr', default=0.00001, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=0.01, type=float, help='weight decay rate')
parser.add_argument('--correct_bias', action='store_true', help='correct adam bias term')
parser.add_argument('--adam_epislon', default=1e-6, type=float, help='adam epsilon')
parser.add_argument('--no_decay_bias', action='store_true', help='no weight decay on bias weigh')
parser.add_argument('--adam_beta1', default=0.9, type=float, help='adam beta1 term')
parser.add_argument('--adam_beta2', default=0.98, type=float, help='adam beta2 term')
parser.add_argument('--scheduler', default='linear', type=str,
choices=['cosine', 'inv_sqrt', 'dev_perf', 'constant', 'linear', 'cycle'],
help='lr scheduler to use.')
parser.add_argument('--max_step', type=int, default=None, help='upper epoch limit')
parser.add_argument('--max_epoch', type=int, default=None, help='max epoch of training')
parser.add_argument('--warmup_step', type=int, default=0, help='upper epoch limit')
parser.add_argument('--i_steps', type=str, default='0', help='interval_steps')
parser.add_argument('--i_lrs', type=str, default='0.00025', help='interval_lrs') | null |
19,476 | import logging
import math
import os
from collections import OrderedDict
import argparse
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.98), eps=1e-6, weight_decay=0.0, correct_bias=True):
def reset_state(self):
def step(self, closure=None):
def create_grouped_parameters(model, no_decay_bias):
def create_adam_optimizer(
model,
lr,
weight_decay,
optimizer_grouped_parameters=None,
beta1=0.9,
beta2=0.98,
correct_bias=True,
adam_epislon=1e-6,
no_decay_bias=False
):
if optimizer_grouped_parameters is None:
optimizer_grouped_parameters = create_grouped_parameters(model, no_decay_bias)
optimizer = AdamW(
optimizer_grouped_parameters,
lr=lr,
betas=(beta1, beta2),
eps=adam_epislon,
weight_decay=weight_decay,
correct_bias=correct_bias
)
return optimizer | null |
19,477 | import logging
import math
import os
from collections import OrderedDict
import argparse
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
def create_sgd_optimizer(model, lr):
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.0)
return optimizer | null |
19,478 | import logging
import math
import os
from collections import OrderedDict
import argparse
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.98), eps=1e-6, weight_decay=0.0, correct_bias=True):
def reset_state(self):
def step(self, closure=None):
def create_grouped_parameters(model, no_decay_bias):
def create_adam_optimizer_from_args(model, args, grouped_parameters=None):
if grouped_parameters is None:
grouped_parameters = create_grouped_parameters(model, args.no_decay_bias)
optimizer = AdamW(
grouped_parameters,
lr=args.lr,
betas=(args.adam_beta1, args.adam_beta2),
eps=args.adam_epislon,
weight_decay=args.weight_decay,
correct_bias=args.correct_bias
)
return optimizer | null |
19,479 | import logging
import math
import os
from collections import OrderedDict
import argparse
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
class CosineAnnealingWarmupRestarts(_LRScheduler):
"""
optimizer (Optimizer): Wrapped optimizer.
first_cycle_steps (int): First cycle step size.
cycle_mult(float): Cycle steps magnification. Default: -1.
max_lr(float): First cycle's max learning rate. Default: 0.1.
min_lr(float): Min learning rate. Default: 0.001.
warmup_steps(int): Linear warmup step size. Default: 0.
gamma(float): Decrease rate of max learning rate by cycle. Default: 1.
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(
self,
optimizer : torch.optim.Optimizer,
max_lr : float = 0.1,
min_lr : float = 0.0,
warmup_steps : int = 0,
max_steps : int = 1,
alpha : float = 0.,
last_epoch : int = -1
):
self.max_lr = max_lr # max learning rate in the current cycle
self.min_lr = min_lr # min learning rate
self.warmup_steps = warmup_steps # warmup step size
self.alpha = alpha # decrease rate of max learning rate by cycle
self.max_steps = max_steps
super(CosineAnnealingWarmupRestarts, self).__init__(optimizer, last_epoch)
self.init_lr()
def init_lr(self):
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.min_lr
def get_lr(self):
if self.last_epoch < self.warmup_steps:
curr_lr = self.max_lr * self.last_epoch / self.warmup_steps
return curr_lr
else:
_step = min(self.last_epoch, self.max_steps)
cosine_decay = 0.5 * (1 + math.cos(math.pi * _step / self.max_steps))
decayed = (1 - self.alpha) * cosine_decay + self.alpha
return self.max_lr * decayed # learning_rate * decayed
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = math.floor(epoch)
_lr = self.get_lr()
for param_group in self.optimizer.param_groups:
param_group['lr'] = _lr
class CyclicScheduler(_LRScheduler):
def __init__(
self,
optimizer,
interval_steps = [],
interval_lrs = [],
last_epoch = -1,
):
self.optimizer = optimizer
self.interval_steps = interval_steps
self.interval_lrs = interval_lrs
self.last_epoch = last_epoch
super(CyclicScheduler, self).__init__(optimizer, last_epoch)
self.init_lr()
def init_lr(self):
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.interval_lrs[0]
def get_lr(self):
for _i in range(0, len(self.interval_steps)-1):
if self.last_epoch >= self.interval_steps[_i] and self.last_epoch < self.interval_steps[_i + 1]:
_alpha = (self.last_epoch - self.interval_steps[_i]) / (self.interval_steps[_i + 1] - self.interval_steps[_i] + 1e-6)
if _alpha < 0:
_alpha = 0
if _alpha >= 1:
_alpha = 1
curr_lr = _alpha * self.interval_lrs[_i + 1] + (1.0 - _alpha) * self.interval_lrs[_i]
return curr_lr
return self.interval_lrs[-1]
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
#self.max_lr = self.base_max_lr * (self.gamma**self.cycle)
self.last_epoch = math.floor(epoch)
_lr = self.get_lr()
for param_group in self.optimizer.param_groups: #, self.get_lr()):
param_group['lr'] = _lr
def get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps,
num_training_steps,
last_epoch=-1
):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_constant_schedule_with_warmup(
optimizer,
num_warmup_steps,
num_training_steps,
last_epoch=-1
):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch)
def create_optimizer_scheduler(optimizer, args):
if args.scheduler == 'cosine':
scheduler = CosineAnnealingWarmupRestarts(
optimizer,
max_lr=args.lr,
min_lr=0.0,
warmup_steps=args.warmup_step,
max_steps=args.max_step, alpha=0
)
elif args.scheduler == 'linear':
scheduler = get_linear_schedule_with_warmup(
optimizer, args.warmup_step, args.max_step, last_epoch=-1
)
elif args.scheduler == 'cycle':
if args.i_steps is not None:
args.i_steps = [int(_i) for _i in args.i_steps.split(',')]
args.i_lrs = [float(_i) for _i in args.i_lrs.split(',')]
args.max_step = args.i_steps[-1]
print('max_step is rest to', args.max_step)
scheduler = CyclicScheduler(
optimizer, interval_steps=args.i_steps, interval_lrs=args.i_lrs
)
elif args.scheduler == 'constant':
scheduler = get_constant_schedule_with_warmup(
optimizer, args.warmup_step, args.max_step, last_epoch=-1
)
else:
# constant leanring rate.
scheduler = None
return scheduler | null |
19,480 | import argparse
import time
import math
import os, sys
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
def add_gpu_params(parser: argparse.ArgumentParser):
parser.add_argument("--platform", default='k8s', type=str, help='platform cloud')
parser.add_argument("--local_rank", default=0, type=int, help='local rank')
parser.add_argument("--rank", default=0, type=int, help='rank')
parser.add_argument("--device", default=0, type=int, help='device')
parser.add_argument("--world_size", default=0, type=int, help='world size')
parser.add_argument("--random_seed", default=10, type=int, help='random seed') | null |
19,481 | import argparse
import time
import math
import os, sys
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
def distributed_opt(args, model, opt, grad_acc=1):
if args.platform == 'azure':
args.hvd.broadcast_parameters(model.state_dict(), root_rank=0)
opt = args.hvd.DistributedOptimizer(
opt, named_parameters=model.named_parameters(), backward_passes_per_step=grad_acc
)
elif args.platform == 'philly' or args.platform == 'k8s' or args.platform == 'local':
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank,
find_unused_parameters=False, broadcast_buffers=False
)
return model, opt | null |
19,482 | import argparse
import time
import math
import os, sys
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
def parse_gpu(args):
torch.manual_seed(args.random_seed)
if args.platform == 'local':
dist.init_process_group(backend='nccl')
local_rank = torch.distributed.get_rank()
torch.cuda.set_device(local_rank)
device = torch.device('cuda', local_rank)
args.rank = local_rank
args.device = device
args.world_size = torch.distributed.get_world_size()
args.dist = dist
elif args.platform == 'azure':
import horovod.torch as hvd
hvd.init()
print('azure hvd rank', hvd.rank(), 'local rank', hvd.local_rank())
local_rank = hvd.local_rank()
torch.cuda.set_device(local_rank)
device = torch.device('cuda', local_rank)
rank = hvd.rank()
world_size = hvd.size()
args.local_rank = local_rank
args.rank = rank
args.device = device
args.world_size = world_size
args.hvd = hvd
elif args.platform == 'philly':
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl')
rank = dist.get_rank()
world_size = torch.distributed.get_world_size()
device = torch.device('cuda', local_rank)
args.rank = rank
args.device = device
args.world_size = world_size
args.dist = dist
elif args.platform == 'k8s':
master_uri = f"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}"
local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.local_rank = local_rank
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
world_rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
rank = world_rank
torch.cuda.set_device(local_rank)
dist.init_process_group(
backend='nccl',
init_method=master_uri,
world_size=world_size,
rank=world_rank,
)
device = torch.device("cuda", local_rank)
args.rank = rank
args.device = device
args.world_size = world_size
args.dist = dist
print(
'myrank:', args.rank,
'local_rank:', args.local_rank,
'device_count:', torch.cuda.device_count(),
'world_size:', args.world_size
) | null |
19,483 | import argparse
import time
import math
import os, sys
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
def cleanup(args):
if args.platform == 'k8s' or args.platform == 'philly':
args.dist.destroy_process_group() | null |
19,484 | import functools
import os, shutil
import numpy as np
import torch
def logging(s, log_path, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(log_path, 'a+') as f_log:
f_log.write(s + '\n')
def get_logger(log_path, **kwargs):
return functools.partial(logging, log_path=log_path, **kwargs)
def create_exp_dir(dir_path, scripts_to_save=None, debug=False):
if debug:
print('Debug Mode : no experiment dir created')
return functools.partial(logging, log_path=None, log_=False)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print('Experiment dir : {}'.format(dir_path))
if scripts_to_save is not None:
script_path = os.path.join(dir_path, 'scripts')
if not os.path.exists(script_path):
os.makedirs(script_path)
for script in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
return get_logger(log_path=os.path.join(dir_path, 'log.txt')) | null |
19,485 | import functools
import os, shutil
import numpy as np
import torch
def save_checkpoint(model, optimizer, path, epoch):
torch.save(model, os.path.join(path, 'model_{}.pt'.format(epoch)))
torch.save(optimizer.state_dict(), os.path.join(path, 'optimizer_{}.pt'.format(epoch))) | null |
19,486 | import os
import json
import regex as re
from functools import lru_cache
The provided code snippet includes necessary dependencies for implementing the `bytes_to_unicode` function. Write a Python function `def bytes_to_unicode()` to solve the following problem:
Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
Here is the function:
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs)) | Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. |
19,487 | import os
import json
import regex as re
from functools import lru_cache
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
Here is the function:
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). |
19,488 | import os
import json
import regex as re
from functools import lru_cache
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
try:
import regex as re
self.re = re
except ImportError:
raise ImportError('Please install regex with: pip install regex')
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
if token:
tokens.append(token)
return bpe_tokens, tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def get_encoder(models_dir):
with open(os.path.join(models_dir, 'encoder.json'), 'r') as f:
encoder = json.load(f)
with open(os.path.join(models_dir, 'vocab.bpe'), 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
) | null |
19,489 | import sys
import argparse
import codecs
import copy
import os
import pyter
import logging
import nltk
import subprocess
import re
from bert_score import score
from metrics.chrF import computeChrF
from metrics.bleurt.bleurt import score as bleurt_score
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
from razdel import tokenize
from tabulate import tabulate
def parse(refs_path, hyps_path, num_refs, lng='en'):
logging.info('STARTING TO PARSE INPUTS...')
print('STARTING TO PARSE INPUTS...')
# references
references = []
for i in range(num_refs):
fname = refs_path + str(i) if num_refs > 1 else refs_path
with codecs.open(fname, 'r', 'utf-8') as f:
texts = f.read().split('\n')
for j, text in enumerate(texts):
if len(references) <= j:
references.append([text])
else:
references[j].append(text)
# references tokenized
references_tok = copy.copy(references)
for i, refs in enumerate(references_tok):
if lng == 'ru':
references_tok[i] = [' '.join([_.text for _ in tokenize(ref)]) for ref in refs]
else:
references_tok[i] = [' '.join(nltk.word_tokenize(ref)) for ref in refs]
# hypothesis
with codecs.open(hyps_path, 'r', 'utf-8') as f:
hypothesis = f.read().split('\n')
# hypothesis tokenized
hypothesis_tok = copy.copy(hypothesis)
if lng == 'ru':
hypothesis_tok = [' '.join([_.text for _ in tokenize(hyp)]) for hyp in hypothesis_tok]
else:
hypothesis_tok = [' '.join(nltk.word_tokenize(hyp)) for hyp in hypothesis_tok]
logging.info('FINISHING TO PARSE INPUTS...')
print('FINISHING TO PARSE INPUTS...')
return references, references_tok, hypothesis, hypothesis_tok
def bleu_score(refs_path, hyps_path, num_refs):
logging.info('STARTING TO COMPUTE BLEU...')
print('STARTING TO COMPUTE BLEU...')
ref_files = []
for i in range(num_refs):
if num_refs == 1:
ref_files.append(refs_path)
else:
ref_files.append(refs_path + str(i))
command = 'perl {0} {1} < {2}'.format(BLEU_PATH, ' '.join(ref_files), hyps_path)
result = subprocess.check_output(command, shell=True)
try:
bleu = float(re.findall('BLEU = (.+?),', str(result))[0])
except:
logging.error('ERROR ON COMPUTING METEOR. MAKE SURE YOU HAVE PERL INSTALLED GLOBALLY ON YOUR MACHINE.')
print('ERROR ON COMPUTING METEOR. MAKE SURE YOU HAVE PERL INSTALLED GLOBALLY ON YOUR MACHINE.')
bleu = -1
logging.info('FINISHING TO COMPUTE BLEU...')
print('FINISHING TO COMPUTE BLEU...')
return bleu
def bleu_nltk(references, hypothesis):
# check for empty lists
references_, hypothesis_ = [], []
for i, refs in enumerate(references):
refs_ = [ref for ref in refs if ref.strip() != '']
if len(refs_) > 0:
references_.append([ref.split() for ref in refs_])
hypothesis_.append(hypothesis[i].split())
chencherry = SmoothingFunction()
return corpus_bleu(references_, hypothesis_, smoothing_function=chencherry.method3)
def meteor_score(references, hypothesis, num_refs, lng='en'):
logging.info('STARTING TO COMPUTE METEOR...')
print('STARTING TO COMPUTE METEOR...')
hyps_tmp, refs_tmp = 'hypothesis_meteor', 'reference_meteor'
with codecs.open(hyps_tmp, 'w', 'utf-8') as f:
f.write('\n'.join(hypothesis))
linear_references = []
for refs in references:
for i in range(num_refs):
linear_references.append(refs[i])
with codecs.open(refs_tmp, 'w', 'utf-8') as f:
f.write('\n'.join(linear_references))
try:
command = 'java -Xmx2G -jar {0} '.format(METEOR_PATH)
command += '{0} {1} -l {2} -norm -r {3}'.format(hyps_tmp, refs_tmp, lng, num_refs)
result = subprocess.check_output(command, shell=True)
meteor = result.split(b'\n')[-2].split()[-1]
except:
logging.error('ERROR ON COMPUTING METEOR. MAKE SURE YOU HAVE JAVA INSTALLED GLOBALLY ON YOUR MACHINE.')
print('ERROR ON COMPUTING METEOR. MAKE SURE YOU HAVE JAVA INSTALLED GLOBALLY ON YOUR MACHINE.')
meteor = -1
try:
os.remove(hyps_tmp)
os.remove(refs_tmp)
except:
pass
logging.info('FINISHING TO COMPUTE METEOR...')
print('FINISHING TO COMPUTE METEOR...')
return float(meteor)
def chrF_score(references, hypothesis, num_refs, nworder, ncorder, beta):
logging.info('STARTING TO COMPUTE CHRF++...')
print('STARTING TO COMPUTE CHRF++...')
hyps_tmp, refs_tmp = 'hypothesis_chrF', 'reference_chrF'
# check for empty lists
references_, hypothesis_ = [], []
for i, refs in enumerate(references):
refs_ = [ref for ref in refs if ref.strip() != '']
if len(refs_) > 0:
references_.append(refs_)
hypothesis_.append(hypothesis[i])
with codecs.open(hyps_tmp, 'w', 'utf-8') as f:
f.write('\n'.join(hypothesis_))
linear_references = []
for refs in references_:
linear_references.append('*#'.join(refs[:num_refs]))
with codecs.open(refs_tmp, 'w', 'utf-8') as f:
f.write('\n'.join(linear_references))
rtxt = codecs.open(refs_tmp, 'r', 'utf-8')
htxt = codecs.open(hyps_tmp, 'r', 'utf-8')
try:
totalF, averageTotalF, totalPrec, totalRec = computeChrF(rtxt, htxt, nworder, ncorder, beta, None)
except:
logging.error('ERROR ON COMPUTING CHRF++.')
print('ERROR ON COMPUTING CHRF++.')
totalF, averageTotalF, totalPrec, totalRec = -1, -1, -1, -1
try:
os.remove(hyps_tmp)
os.remove(refs_tmp)
except:
pass
logging.info('FINISHING TO COMPUTE CHRF++...')
print('FINISHING TO COMPUTE CHRF++...')
return totalF, averageTotalF, totalPrec, totalRec
def ter_score(references, hypothesis, num_refs):
logging.info('STARTING TO COMPUTE TER...')
print('STARTING TO COMPUTE TER...')
ter_scores = []
for hyp, refs in zip(hypothesis, references):
candidates = []
for ref in refs[:num_refs]:
if len(ref) == 0:
ter_score = 1
else:
try:
ter_score = pyter.ter(hyp.split(), ref.split())
except:
ter_score = 1
candidates.append(ter_score)
ter_scores.append(min(candidates))
logging.info('FINISHING TO COMPUTE TER...')
print('FINISHING TO COMPUTE TER...')
return sum(ter_scores) / len(ter_scores)
def bert_score_(references, hypothesis, lng='en'):
logging.info('STARTING TO COMPUTE BERT SCORE...')
print('STARTING TO COMPUTE BERT SCORE...')
for i, refs in enumerate(references):
references[i] = [ref for ref in refs if ref.strip() != '']
try:
P, R, F1 = score(hypothesis, references, lang=lng)
logging.info('FINISHING TO COMPUTE BERT SCORE...')
# print('FINISHING TO COMPUTE BERT SCORE...')
P, R, F1 = list(P), list(R), list(F1)
F1 = float(sum(F1) / len(F1))
P = float(sum(P) / len(P))
R = float(sum(R) / len(R))
except:
P, R, F1 = 0, 0, 0
return P, R, F1
def bleurt(references, hypothesis, num_refs, checkpoint = "metrics/bleurt/bleurt-base-128"):
refs, cands = [], []
for i, hyp in enumerate(hypothesis):
for ref in references[i][:num_refs]:
cands.append(hyp)
refs.append(ref)
scorer = bleurt_score.BleurtScorer(checkpoint)
scores = scorer.score(refs, cands)
scores = [max(scores[i:i+num_refs]) for i in range(0, len(scores), num_refs)]
return round(sum(scores) / len(scores), 2)
def run(refs_path, hyps_path, num_refs, lng='en', metrics='bleu,meteor,chrf++,ter,bert,bleurt',ncorder=6, nworder=2, beta=2):
metrics = metrics.lower().split(',')
references, references_tok, hypothesis, hypothesis_tok = parse(refs_path, hyps_path, num_refs, lng)
result = {}
logging.info('STARTING EVALUATION...')
if 'bleu' in metrics:
bleu = bleu_score(refs_path, hyps_path, num_refs)
result['bleu'] = bleu
b = bleu_nltk(references_tok, hypothesis_tok)
result['bleu_nltk'] = b
if 'meteor' in metrics:
meteor = meteor_score(references_tok, hypothesis_tok, num_refs, lng=lng)
result['meteor'] = meteor
if 'chrf++' in metrics:
chrf, _, _, _ = chrF_score(references, hypothesis, num_refs, nworder, ncorder, beta)
result['chrf++'] = chrf
if 'ter' in metrics:
ter = ter_score(references_tok, hypothesis_tok, num_refs)
result['ter'] = ter
if 'bert' in metrics:
P, R, F1 = bert_score_(references, hypothesis, lng=lng)
result['bert_precision'] = P
result['bert_recall'] = R
result['bert_f1'] = F1
if 'bleurt' in metrics and lng == 'en':
s = bleurt(references, hypothesis, num_refs)
result['bleurt'] = s
logging.info('FINISHING EVALUATION...')
return result | null |
19,490 | argparse
import json
import logging
import math
import os
import random
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from huggingface_hub import Repository
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils import get_full_repo_name
from transformers.utils.versions import require_version
from pst.utils import convert_sparse_network, schedule_sparsity_ratio, update_network_sparsity, save_sparse_model
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--task_name",
type=str,
default=None,
help="The name of the glue task to train on.",
choices=list(task_to_keys.keys()),
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to load in all available experiment trackers from the environment and use them for logging.",
)
# Compression Arguments
parser.add_argument(
"--sparsity",
type=float,
default=0.9,
help="Target sparsity ratio.",
)
parser.add_argument(
"--initial_sparsity",
type=float,
default=0.0,
help="Initial sparsity ratio.",
)
parser.add_argument(
"--pruning_method",
type=str,
default='pst',
help="Pruning Method",
)
parser.add_argument(
"--weight_rank",
type=int,
default=8,
help="The rank of trainable weights.",
)
parser.add_argument(
"--weight_beta",
type=float,
default=1.0,
help="The scale of trainable weights.",
)
parser.add_argument(
"--mask_rank",
type=int,
default=8,
help="The rank of trainable mask scores.",
)
parser.add_argument(
"--mask_alpha1",
type=float,
default=1.0,
help="The scale of trainable low-rankness mask scores.",
)
parser.add_argument(
"--mask_alpha2",
type=float,
default=1.0,
help="The scale of trainable structuredness mask scores.",
)
parser.add_argument(
"--initial_warmup",
type=float,
default=0.1,
help="Run `initial_warmup` * `training_step` steps before increase sparsity ratio.",
)
parser.add_argument(
"--final_warmup",
type=float,
default=0.3,
help="Run `final_warmup` * `training_step` steps after sparsity ratio arrive target ratio.",
)
args = parser.parse_args()
# Sanity checks
if args.task_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args | null |
19,491 | import os
import torch
import torch.nn as nn
from collections import OrderedDict
from pst.sparse import SparseLinear
def _setattr(model, name, module):
name_list = name.split(".")
for name in name_list[:-1]:
model = getattr(model, name)
setattr(model, name_list[-1], module)
class SparseLinear(nn.Linear):
"""
Fully Connected layer with on the fly adaptive mask.
"""
def __init__(
self,
in_features,
out_features,
bias = True,
pruning_method = "pst",
weight_rank = 8,
weight_beta = 1.0,
mask_rank = 8,
mask_alpha1 = 1.0,
mask_alpha2 = 1.0
):
super().__init__(in_features=in_features, out_features=out_features, bias=bias)
self.pruning_method = pruning_method
self.weight_rank = weight_rank
self.weight_beta = weight_beta
self.mask_rank = mask_rank
self.mask_alpha1 = mask_alpha1
self.mask_alpha2 = mask_alpha2
self.cur_sparsity = 0.0
if self.pruning_method == "pst":
# create trainable params
self.weight_U = nn.Parameter(torch.randn(out_features, self.weight_rank))
self.weight_V = nn.Parameter(torch.zeros(self.weight_rank, in_features))
self.mask_scores_A = nn.Parameter(torch.randn(out_features, self.mask_rank))
self.mask_scores_B = nn.Parameter(torch.zeros(self.mask_rank, in_features))
self.mask_scores_R = nn.Parameter(torch.zeros(out_features))
self.mask_scores_C = nn.Parameter(torch.zeros(in_features))
self.weight.requires_grad = False
if self.bias is not None:
self.bias.requires_grad = False
def forward(self, inputs):
if self.pruning_method == "pst":
weight = self.weight + self.weight_beta * self.weight_U @ self.weight_V
mask_scores = weight.abs() + self.mask_alpha1 * self.mask_scores_A @ self.mask_scores_B + \
self.mask_alpha2 * (self.mask_scores_R.unsqueeze(1) + self.mask_scores_C.unsqueeze(0))
mask = SparseBinarizer.apply(mask_scores, self.cur_sparsity)
masked_weight = mask * weight
return F.linear(inputs, masked_weight, self.bias)
else:
return F.linear(inputs, self.weight, self.bias)
def convert(self):
if self.pruning_method == "pst":
weight = self.weight + self.weight_beta * self.weight_U @ self.weight_V
mask_scores = weight.abs() + self.mask_alpha1 * self.mask_scores_A @ self.mask_scores_B + \
self.mask_alpha2 * (self.mask_scores_R.unsqueeze(1) + self.mask_scores_C.unsqueeze(0))
mask = SparseBinarizer.apply(mask_scores, self.cur_sparsity)
masked_weight = mask * weight
self.old_weight = self.weight.data.clone()
self.weight.data = masked_weight.data
def restore(self):
if self.pruning_method == "pst":
self.weight.data = self.old_weight
del self.old_weight
def convert_sparse_network(
model,
pruning_method,
weight_rank,
weight_beta,
mask_rank,
mask_alpha1,
mask_alpha2,
logger=None
):
for name, module in model.named_modules():
if isinstance(module, nn.Linear):
new_module = SparseLinear(module.in_features, module.out_features,
module.bias is not None, pruning_method, weight_rank, weight_beta,
mask_rank, mask_alpha1, mask_alpha2)
new_module.weight.data = module.weight.data
if module.bias is not None:
new_module.bias.data = module.bias.data
# replace original module by new sparse module
_setattr(model, name, new_module)
if logger:
logger.info(f"convert {name} to sparse.")
else:
print(f"convert {name} to sparse.") | null |
19,492 | import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from emmental import MaskedBertConfig, MaskedBertForSequenceClassification, TeacherBertForSequenceClassification
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def schedule_threshold(
step: int,
total_step: int,
warmup_steps: int,
initial_threshold: float,
final_threshold: float,
initial_warmup: int,
final_warmup: int,
final_lambda: float,
):
if step <= initial_warmup * warmup_steps:
threshold = initial_threshold
elif step > (total_step - final_warmup * warmup_steps):
threshold = final_threshold
else:
spars_warmup_steps = initial_warmup * warmup_steps
spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps
mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps)
threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff ** 3)
regu_lambda = final_lambda * threshold / final_threshold
return threshold, regu_lambda
def regularization(model: nn.Module, mode: str):
regu, counter = 0, 0
for name, param in model.named_parameters():
if "mask_scores" in name:
if mode == "l1":
regu += torch.norm(torch.sigmoid(param), p=1) / param.numel()
elif mode == "l0":
regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel()
else:
ValueError("Don't know this mode.")
counter += 1
return regu / counter
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "/MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
# Global TopK
if args.global_topk:
threshold_mem = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
if "masked" in args.model_type:
inputs["threshold"] = args.final_threshold
if args.global_topk:
if threshold_mem is None:
concat = torch.cat(
[param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name]
)
n = concat.numel()
kth = max(n - (int(n * args.final_threshold) + 1), 1)
threshold_mem = concat.kthvalue(kth).values.item()
inputs["threshold"] = threshold_mem
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
from scipy.special import softmax
probs = softmax(preds, axis=-1)
entropy = np.exp((-probs * np.log(probs)).sum(axis=-1).mean())
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
if entropy is not None:
result["eval_avg_entropy"] = entropy
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer, teacher=None)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_dataset, model, tokenizer, teacher=None):
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(log_dir=args.output_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad],
"lr": args.mask_scores_learning_rate,
},
{
"params": [
p
for n, p in model.named_parameters()
if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay)
],
"lr": args.learning_rate,
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay)
],
"lr": args.learning_rate,
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
# Distillation
if teacher is not None:
logger.info(" Training with distillation")
global_step = 0
# Global TopK
if args.global_topk:
threshold_mem = None
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproducibility
global_representations_bank_finetuned = None
global_representations_bank_pretrained = None
global_representations_bank_snaps = None
threshold = args.initial_threshold
contrastive_sampler = SequentialSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
contrastive_dataloader = DataLoader(train_dataset, sampler=contrastive_sampler, batch_size=args.train_batch_size)
print('Let us encode examples!')
with torch.no_grad():
for batch in tqdm(contrastive_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "labels": batch[3], 'idx': batch[4]}
representations = teacher(encode_example=True, **inputs).cpu()
if global_representations_bank_finetuned is None:
global_representations_bank_finetuned = representations
else:
global_representations_bank_finetuned = torch.cat((global_representations_bank_finetuned, representations), dim=0)
global_representations_bank_finetuned = global_representations_bank_finetuned.unsqueeze(1)
if not args.use_distill:
teacher = None
for epoch_idx, _ in enumerate(train_iterator):
# calculate and store example representations and labels (for verification)
if args.use_contrastive_loss:
contrastive_model = model.module if hasattr(model, "module") else model
representations_bank = None
labels_bank = None
contrastive_sampler = SequentialSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
contrastive_dataloader = DataLoader(train_dataset, sampler=contrastive_sampler, batch_size=args.train_batch_size)
print('Let us encode examples!')
with torch.no_grad():
for batch in tqdm(contrastive_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], 'idx': batch[4]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
if "masked" in args.model_type:
inputs["threshold"] = threshold
labels = inputs['labels'].cpu()
representations = contrastive_model(encode_example=True, **inputs).cpu()
if representations_bank is None:
representations_bank = representations
labels_bank = labels
else:
representations_bank = torch.cat((representations_bank, representations), dim=0)
labels_bank = torch.cat((labels_bank, labels), dim=0)
if epoch_idx == 0:
global_representations_bank_pretrained = representations_bank.unsqueeze(1)
else:
# add to global representations bank
if global_representations_bank_snaps is None:
global_representations_bank_snaps = representations_bank.unsqueeze(1)
else:
global_representations_bank_snaps = torch.cat((global_representations_bank_snaps, representations_bank.unsqueeze(1)), dim=1) # train_num * rep_num * hidsize
contrastive_model.global_representations_bank_finetuned = global_representations_bank_finetuned
contrastive_model.global_representations_bank_pretrained = global_representations_bank_pretrained
contrastive_model.global_representations_bank_snaps = global_representations_bank_snaps
contrastive_model.global_labels_bank = labels_bank
# Begin this epoch
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
threshold, regu_lambda = schedule_threshold(
step=global_step,
total_step=t_total,
warmup_steps=args.warmup_steps,
final_threshold=args.final_threshold,
initial_threshold=args.initial_threshold,
final_warmup=args.final_warmup,
initial_warmup=args.initial_warmup,
final_lambda=args.final_lambda,
)
# Global TopK
if args.global_topk:
if threshold == 1.0:
threshold = -1e2 # Or an indefinitely low quantity
else:
if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0):
# Sort all the values to get the global topK
concat = torch.cat(
[param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name]
)
n = concat.numel()
kth = max(n - (int(n * threshold) + 1), 1)
threshold_mem = concat.kthvalue(kth).values.item()
threshold = threshold_mem
else:
threshold = threshold_mem
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], 'idx': batch[4]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
if "masked" in args.model_type:
inputs["threshold"] = threshold
outputs = model(**inputs)
loss, logits_stu = outputs # model outputs are always tuple in transformers (see doc)
# Distillation loss
if teacher is not None:
if "token_type_ids" not in inputs:
inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2]
with torch.no_grad():
logits_tea = teacher(
input_ids=inputs["input_ids"],
token_type_ids=inputs["token_type_ids"],
attention_mask=inputs["attention_mask"],
).logits
loss_logits = (
F.kl_div(
input=F.log_softmax(logits_stu / args.temperature, dim=-1),
target=F.softmax(logits_tea / args.temperature, dim=-1),
reduction="batchmean",
)
* (args.temperature ** 2)
)
loss += args.distill_loss_weight * loss_logits
# Regularization
if args.regularization is not None:
regu_ = regularization(model=model, mode=args.regularization)
loss = loss + regu_lambda * regu_
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_writer.add_scalar("threshold", threshold, global_step)
for name, param in model.named_parameters():
if not param.requires_grad:
continue
tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step)
tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step)
tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step)
tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step)
tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step)
tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step)
if args.regularization is not None and "mask_scores" in name:
if args.regularization == "l1":
perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel()
elif args.regularization == "l0":
perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel()
tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()
logs["learning_rate"] = learning_rate_scalar[0]
if len(learning_rate_scalar) > 1:
for idx, lr in enumerate(learning_rate_scalar[1:]):
logs[f"learning_rate/{idx+1}"] = lr
logs["loss"] = loss_scalar
if teacher is not None:
logs["loss/distil"] = loss_logits.item()
if args.regularization is not None:
logs["loss/regularization"] = regu_.item()
if (teacher is not None) or (args.regularization is not None):
if (teacher is not None) and (args.regularization is not None):
logs["loss/instant_ce"] = (
loss.item()
- regu_lambda * logs["loss/regularization"]
- args.distill_loss_weight * logs["loss/distil"]
) / args.ce_loss_weight
elif teacher is not None:
logs["loss/instant_ce"] = (
loss.item() - args.distill_loss_weight * logs["loss/distil"]
) / args.ce_loss_weight
else:
logs["loss/instant_ce"] = loss.item() - regu_lambda * logs["loss/regularization"]
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
# SAVE checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step | Train the model |
19,493 | import argparse
import glob
import logging
import os
import random
import timeit
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from emmental import MaskedBertConfig, MaskedBertForQuestionAnswering, TeacherBertForQuestionAnswering
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForQuestionAnswering,
BertTokenizer,
get_linear_schedule_with_warmup,
squad_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
squad_evaluate,
)
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def schedule_threshold(
step: int,
total_step: int,
warmup_steps: int,
initial_threshold: float,
final_threshold: float,
initial_warmup: int,
final_warmup: int,
final_lambda: float,
):
if step <= initial_warmup * warmup_steps:
threshold = initial_threshold
elif step > (total_step - final_warmup * warmup_steps):
threshold = final_threshold
else:
spars_warmup_steps = initial_warmup * warmup_steps
spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps
mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps)
threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff ** 3)
regu_lambda = final_lambda * threshold / final_threshold
return threshold, regu_lambda
def regularization(model: nn.Module, mode: str):
regu, counter = 0, 0
for name, param in model.named_parameters():
if "mask_scores" in name:
if mode == "l1":
regu += torch.norm(torch.sigmoid(param), p=1) / param.numel()
elif mode == "l0":
regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel()
else:
ValueError("Don't know this mode.")
counter += 1
return regu / counter
def evaluate(args, model, tokenizer, prefix=""):
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
# Global TopK
if args.global_topk:
threshold_mem = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
example_indices = batch[3]
# XLNet and XLM use more arguments for their predictions
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[4], "p_mask": batch[5]})
# for lang_id-sensitive xlm models
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
if "masked" in args.model_type:
inputs["threshold"] = args.final_threshold
if args.global_topk:
if threshold_mem is None:
concat = torch.cat(
[param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name]
)
n = concat.numel()
kth = max(n - (int(n * args.final_threshold) + 1), 1)
threshold_mem = concat.kthvalue(kth).values.item()
inputs["threshold"] = threshold_mem
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i]) for output in outputs]
# Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler"
# models only use two.
if len(output) >= 5:
start_logits = output[0]
start_top_index = output[1]
end_logits = output[2]
end_top_index = output[3]
cls_logits = output[4]
result = SquadResult(
unique_id,
start_logits,
end_logits,
start_top_index=start_top_index,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
else:
start_logits, end_logits = output
result = SquadResult(unique_id, start_logits, end_logits)
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix))
else:
output_null_log_odds_file = None
# XLNet and XLM use a more complex post-processing procedure
if args.model_type in ["xlnet", "xlm"]:
start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top
end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top
predictions = compute_predictions_log_probs(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
start_n_top,
end_n_top,
args.version_2_with_negative,
tokenizer,
args.verbose_logging,
)
else:
predictions = compute_predictions_logits(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
args.do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
args.verbose_logging,
args.version_2_with_negative,
args.null_score_diff_threshold,
tokenizer,
)
# Compute the F1 and exact scores.
results = squad_evaluate(examples, predictions)
return results
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer, teacher=None)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_dataset, model, tokenizer, teacher=None):
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(log_dir=args.output_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad],
"lr": args.mask_scores_learning_rate,
},
{
"params": [
p
for n, p in model.named_parameters()
if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay)
],
"lr": args.learning_rate,
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay)
],
"lr": args.learning_rate,
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
# Distillation
if teacher is not None:
logger.info(" Training with distillation")
global_step = 1
# Global TopK
if args.global_topk:
threshold_mem = None
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
# Added here for reproducibility
set_seed(args)
global_representations_bank_finetuned = None
global_representations_bank_pretrained = None
global_representations_bank_snaps = None
threshold = args.initial_threshold
assert teacher is not None
contrastive_sampler = SequentialSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
contrastive_dataloader = DataLoader(train_dataset, sampler=contrastive_sampler, batch_size=args.train_batch_size)
print('Let us encode examples!')
with torch.no_grad():
for batch in tqdm(contrastive_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"start_positions": batch[3],
"end_positions": batch[4],
'idx': batch[-1],
}
representations = teacher(encode_example=True, **inputs).cpu()
if global_representations_bank_finetuned is None:
global_representations_bank_finetuned = representations
else:
global_representations_bank_finetuned = torch.cat((global_representations_bank_finetuned, representations), dim=0)
global_representations_bank_finetuned = global_representations_bank_finetuned.unsqueeze(1)
if not args.use_distill:
teacher = None
for epoch_idx, _ in enumerate(train_iterator):
# HACK: add representation for contrastive learning
# calculate and store example representations and labels (for verification)
if args.use_contrastive_loss:
contrastive_model = model.module if hasattr(model, "module") else model
representations_bank = None
contrastive_sampler = SequentialSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
contrastive_dataloader = DataLoader(train_dataset, sampler=contrastive_sampler, batch_size=args.train_batch_size)
print('Let us encode examples!')
with torch.no_grad():
for batch in tqdm(contrastive_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"start_positions": batch[3],
"end_positions": batch[4],
'idx': batch[-1],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[5], "p_mask": batch[6]})
if args.version_2_with_negative:
inputs.update({"is_impossible": batch[7]})
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
if "masked" in args.model_type:
inputs["threshold"] = threshold
representations = contrastive_model(encode_example=True, **inputs).cpu()
if representations_bank is None:
representations_bank = representations
else:
representations_bank = torch.cat((representations_bank, representations), dim=0)
if epoch_idx == 0:
global_representations_bank_pretrained = representations_bank.unsqueeze(1)
else:
# add to global representations bank
if global_representations_bank_snaps is None:
global_representations_bank_snaps = representations_bank.unsqueeze(1)
else:
global_representations_bank_snaps = torch.cat((global_representations_bank_snaps, representations_bank.unsqueeze(1)), dim=1) # train_num * rep_num * hidsize
contrastive_model.global_representations_bank_finetuned = global_representations_bank_finetuned
contrastive_model.global_representations_bank_pretrained = global_representations_bank_pretrained
contrastive_model.global_representations_bank_snaps = global_representations_bank_snaps
# Begin this epoch
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
threshold, regu_lambda = schedule_threshold(
step=global_step,
total_step=t_total,
warmup_steps=args.warmup_steps,
final_threshold=args.final_threshold,
initial_threshold=args.initial_threshold,
final_warmup=args.final_warmup,
initial_warmup=args.initial_warmup,
final_lambda=args.final_lambda,
)
# Global TopK
if args.global_topk:
if threshold == 1.0:
threshold = -1e2 # Or an indefinitely low quantity
else:
if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0):
# Sort all the values to get the global topK
concat = torch.cat(
[param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name]
)
n = concat.numel()
kth = max(n - (int(n * threshold) + 1), 1)
threshold_mem = concat.kthvalue(kth).values.item()
threshold = threshold_mem
else:
threshold = threshold_mem
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"start_positions": batch[3],
"end_positions": batch[4],
'idx': batch[-1],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[5], "p_mask": batch[6]})
if args.version_2_with_negative:
inputs.update({"is_impossible": batch[7]})
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
if "masked" in args.model_type:
inputs["threshold"] = threshold
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss, start_logits_stu, end_logits_stu = outputs
# Distillation loss
if teacher is not None:
with torch.no_grad():
teacher_out = teacher(
input_ids=inputs["input_ids"],
token_type_ids=inputs["token_type_ids"],
attention_mask=inputs["attention_mask"],
)
start_logits_tea, end_logits_tea = teacher_out.start_logits, teacher_out.end_logits
loss_start = (
F.kl_div(
input=F.log_softmax(start_logits_stu / args.temperature, dim=-1),
target=F.softmax(start_logits_tea / args.temperature, dim=-1),
reduction="batchmean",
)
* (args.temperature ** 2)
)
loss_end = (
F.kl_div(
input=F.log_softmax(end_logits_stu / args.temperature, dim=-1),
target=F.softmax(end_logits_tea / args.temperature, dim=-1),
reduction="batchmean",
)
* (args.temperature ** 2)
)
loss_logits = (loss_start + loss_end) / 2.0
loss += args.distill_loss_weight * loss_logits
# Regularization
if args.regularization is not None:
regu_ = regularization(model=model, mode=args.regularization)
loss = loss + regu_lambda * regu_
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_writer.add_scalar("threshold", threshold, global_step)
for name, param in model.named_parameters():
if not param.requires_grad:
continue
tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step)
tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step)
tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step)
tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step)
if "pooler" in name:
continue
tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step)
tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step)
if args.regularization is not None and "mask_scores" in name:
if args.regularization == "l1":
perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel()
elif args.regularization == "l0":
perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel()
tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log metrics
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
learning_rate_scalar = scheduler.get_lr()
tb_writer.add_scalar("lr", learning_rate_scalar[0], global_step)
if len(learning_rate_scalar) > 1:
for idx, lr in enumerate(learning_rate_scalar[1:]):
tb_writer.add_scalar(f"lr/{idx+1}", lr, global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
if teacher is not None:
tb_writer.add_scalar("loss/distil", loss_logits.item(), global_step)
if args.regularization is not None:
tb_writer.add_scalar("loss/regularization", regu_.item(), global_step)
if (teacher is not None) or (args.regularization is not None):
if (teacher is not None) and (args.regularization is not None):
tb_writer.add_scalar(
"loss/instant_ce",
(loss.item() - regu_lambda * regu_.item() - args.distill_loss_weight * loss_logits.item())
/ args.ce_loss_weight,
global_step,
)
elif teacher is not None:
tb_writer.add_scalar(
"loss/instant_ce",
(loss.item() - args.distill_loss_weight * loss_logits.item()) / args.ce_loss_weight,
global_step,
)
else:
tb_writer.add_scalar(
"loss/instant_ce", loss.item() - regu_lambda * regu_.item(), global_step
)
logging_loss = tr_loss
# Save model checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step | Train the model |
19,494 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset, load_metric
import torch
import transformers
from trainer_qa import QuestionAnsweringTrainer
from transformers import (
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizerFast,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from utils_qa import postprocess_qa_predictions
from torch.utils.data import DataLoader
from model.PruneBert import PruneBertForQuestionAnswering
from model.TeacherBert import TeacherBertForQuestionAnswering
from prune.prune_utils import determine_pruning_sequence, what_to_prune_head, calculate_head_and_intermediate_importance, what_to_prune_mlp
from transformers.models.bert import BertPreTrainedModel, BertModel
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.modeling_outputs import QuestionAnsweringModelOutput
import torch.nn as nn
from tqdm import tqdm
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, CAPTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name+'.py', data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files, field="data")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
teacher = TeacherBertForQuestionAnswering.from_pretrained(
training_args.teacher_path,
from_tf=bool(".ckpt" in training_args.teacher_path),
config=config,
alignrep=training_args.alignrep
)
teacher = teacher.cuda()
model = PruneBertForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
contrastive_temperature=training_args.contrastive_temperature,
ce_loss_weight=training_args.ce_loss_weight,
cl_unsupervised_loss_weight=training_args.cl_unsupervised_loss_weight,
distill_loss_weight=training_args.distill_loss_weight,
extra_examples=training_args.extra_examples,
alignrep=training_args.alignrep,
get_teacher_logits=teacher.get_teacher_logits if training_args.use_distill else None,
distill_temperature=training_args.distill_temperature
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
# Training preprocessing
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=data_args.max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
if training_args.do_train:
train_dataset = datasets["train"].map(
prepare_train_features,
batched=True,
with_indices=False,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
def add_id(examples, idx):
examples['idx'] = idx
return examples
train_dataset = train_dataset.map(
add_id,
batched=True,
with_indices=True,
)
# Validation preprocessing
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=data_args.max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if training_args.do_eval:
validation_dataset = datasets["validation"].map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
# We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data
# collator.
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Post-processing:
def post_processing_function(examples, features, predictions):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=data_args.version_2_with_negative,
n_best_size=data_args.n_best_size,
max_answer_length=data_args.max_answer_length,
null_score_diff_threshold=data_args.null_score_diff_threshold,
output_dir=training_args.output_dir,
is_world_process_zero=trainer.is_world_process_zero(),
)
# Format the result to the format the metric expects.
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in datasets["validation"]]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad_metric.py")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Initialize our Trainer
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=validation_dataset if training_args.do_eval else None,
eval_examples=datasets["validation"] if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
post_process_function=post_processing_function,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
global_representations_bank_finetuned = None
global_representations_bank_pretrained = None
global_representations_bank_snaps = None
# contrastive learning of finetuned model
if training_args.use_contrastive_loss:
dataloader = DataLoader(
train_dataset,
batch_size=trainer.args.train_batch_size,
shuffle=False,
collate_fn=trainer.data_collator,
drop_last=trainer.args.dataloader_drop_last,
num_workers=trainer.args.dataloader_num_workers,
pin_memory=trainer.args.dataloader_pin_memory,
)
with torch.no_grad():
for inputs in tqdm(dataloader):
inputs = trainer._prepare_inputs(inputs)
representations = teacher(encode_example=True, **inputs).cpu()
if global_representations_bank_finetuned is None:
global_representations_bank_finetuned = representations
else:
global_representations_bank_finetuned = torch.cat((global_representations_bank_finetuned, representations), dim=0)
global_representations_bank_finetuned = global_representations_bank_finetuned.unsqueeze(1)
if not training_args.use_distill:
teacher = None
if training_args.do_prune:
model = trainer._wrap_model(trainer.model, training=False)
model = model.module if hasattr(model, 'module') else model
# Determine the number of heads to prune
prune_percent = training_args.prune_percent
prune_percent = None if prune_percent == '' else [float(x) for x in prune_percent.split(',')]
prune_sequence_head, prune_sequence_intermediate = determine_pruning_sequence(
prune_percent,
config.num_hidden_layers,
config.num_attention_heads,
config.intermediate_size,
training_args.at_least_x_heads_per_layer,
)
prune_sequence = zip(prune_sequence_head, prune_sequence_intermediate)
for step, (n_to_prune_head, n_to_prune_intermediate) in enumerate(prune_sequence):
logger.info("We are going to prune {} heads and {} intermediate !!!".format(n_to_prune_head, n_to_prune_intermediate))
head_importance, intermediate_importance = calculate_head_and_intermediate_importance(
model,
train_dataset,
old_head_mask=model.head_mask,
old_intermediate_mask=model.intermediate_mask,
trainer=trainer,
normalize_scores_by_layer=training_args.normalize_pruning_by_layer,
subset_size=training_args.subset_ratio
)
for layer in range(len(head_importance)):
layer_scores = head_importance[layer].cpu().data
logger.info("head importance score")
logger.info("\t".join(f"{x:.5f}" for x in layer_scores))
# Determine which heads to prune
new_head_mask = what_to_prune_head(
head_importance,
n_to_prune=n_to_prune_head,
old_head_mask=model.head_mask,
at_least_x_heads_per_layer=training_args.at_least_x_heads_per_layer,
)
new_intermediate_mask = what_to_prune_mlp(
intermediate_importance,
n_to_prune=n_to_prune_intermediate,
old_intermediate_mask=model.intermediate_mask
)
for layer in range(len(new_head_mask)):
y = new_head_mask[layer].cpu().data
logger.info("head mask")
logger.info("\t".join("{}".format(int(x)) for x in y))
logger.info("intermediate mask")
for layer in range(len(new_intermediate_mask)):
y = new_intermediate_mask[layer]
logger.info("Layer {} has {} intermediate active.".format(layer, torch.sum(y)))
# calculate and store example representations and labels (for verification)
if training_args.use_contrastive_loss:
representations_bank = None
dataloader = DataLoader(
train_dataset,
batch_size=trainer.args.train_batch_size,
shuffle=False,
collate_fn=trainer.data_collator,
drop_last=trainer.args.dataloader_drop_last,
num_workers=trainer.args.dataloader_num_workers,
pin_memory=trainer.args.dataloader_pin_memory,
)
with torch.no_grad():
for inputs in tqdm(dataloader):
inputs = trainer._prepare_inputs(inputs)
representations = model(encode_example=True, **inputs).cpu()
if representations_bank is None:
representations_bank = representations
else:
representations_bank = torch.cat((representations_bank, representations), dim=0)
if step == 0:
# add to global representations bank for pretrained
global_representations_bank_pretrained = representations_bank
global_representations_bank_pretrained = global_representations_bank_pretrained.unsqueeze(1)
else:
# add to global representations bank for snaps
if global_representations_bank_snaps is None:
global_representations_bank_snaps = representations_bank.unsqueeze(1)
else:
global_representations_bank_snaps = torch.cat((global_representations_bank_snaps, representations_bank.unsqueeze(1)), dim=1)
# update bank
model.global_representations_bank_finetuned = global_representations_bank_finetuned
model.global_representations_bank_pretrained = global_representations_bank_pretrained
model.global_representations_bank_snaps = global_representations_bank_snaps
# apply structured pruing
model.head_mask[:] = new_head_mask.clone()
model.intermediate_mask[:] = new_intermediate_mask.clone()
# re-train
trainer.optimizer = trainer.lr_scheduler = None
trainer.args.num_train_epochs = training_args.retrain_num_train_epochs
trainer.train()
# re-eval
metrics = trainer.evaluate(eval_dataset=validation_dataset)
task = data_args.dataset_name
new_metrics = {}
for k in metrics:
new_metrics["{}_{}_{}".format(task, k, step+1)] = metrics[k]
metrics = new_metrics
trainer.log_metrics("eval_{}".format(step+1), metrics)
trainer.save_metrics("eval_{}".format(step+1), metrics)
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
results = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
19,498 | import torch
from torch.utils.data import DataLoader, SequentialSampler
import numpy as np
from itertools import islice
from tqdm import tqdm
from math import sqrt
from collections import defaultdict
The provided code snippet includes necessary dependencies for implementing the `determine_pruning_sequence` function. Write a Python function `def determine_pruning_sequence( prune_percents, n_heads, n_layers, n_intermediate, at_least_x_heads_per_layer=1, )` to solve the following problem:
Same ratio for attention heads and MLPs
Here is the function:
def determine_pruning_sequence(
prune_percents,
n_heads,
n_layers,
n_intermediate,
at_least_x_heads_per_layer=1,
):
'''
Same ratio for attention heads and MLPs
'''
# Compute the number of heads to prune on percentage if needed
all_n_to_prune = []
for prune_percent in prune_percents:
total_heads = n_heads * n_layers
n_to_prune = int(total_heads * prune_percent / 100)
# Make sure we keep at least one head per layer
if at_least_x_heads_per_layer > 0:
if n_to_prune > total_heads - at_least_x_heads_per_layer * n_layers:
assert False
all_n_to_prune.append(n_to_prune)
# We'll incrementally prune layers and evaluate
all_n_to_prune = sorted(all_n_to_prune)
n_to_prune_sequence_head = all_n_to_prune[:]
for idx in range(1, len(all_n_to_prune)):
n_to_prune_sequence_head[idx] = all_n_to_prune[idx] - all_n_to_prune[idx-1]
# Verify that the total number of heads pruned stayed the same
assert all_n_to_prune[-1] == sum(n_to_prune_sequence_head)
# MLP
all_n_to_prune = []
for prune_percent in prune_percents:
total_intermediate = n_layers * n_intermediate
n_to_prune = int(total_intermediate * prune_percent / 100)
all_n_to_prune.append(n_to_prune)
n_to_prune_sequence_intermediate = [0 for _ in range(len(all_n_to_prune))]
n_to_prune_sequence_intermediate[0] = all_n_to_prune[0]
for idx in range(1, len(all_n_to_prune)):
n_to_prune_sequence_intermediate[idx] = all_n_to_prune[idx] - all_n_to_prune[idx-1]
assert len(n_to_prune_sequence_head) == len(n_to_prune_sequence_intermediate)
return n_to_prune_sequence_head, n_to_prune_sequence_intermediate | Same ratio for attention heads and MLPs |
19,499 | import torch
from torch.utils.data import DataLoader, SequentialSampler
import numpy as np
from itertools import islice
from tqdm import tqdm
from math import sqrt
from collections import defaultdict
def calculate_head_and_intermediate_importance(
model,
dataset,
old_head_mask,
old_intermediate_mask,
trainer,
normalize_scores_by_layer=True,
disable_progress_bar=False,
subset_size=1.0,
):
training_flag = model.training
model = model.module if hasattr(model, 'module') else model
model.eval()
n_layers, n_heads, n_intermediate = model.config.num_hidden_layers, model.config.num_attention_heads, model.config.intermediate_size
head_importance = torch.zeros(n_layers, n_heads).to(old_head_mask)
head_mask = torch.ones(n_layers, n_heads).to(old_head_mask)[:] = old_head_mask.clone()
head_mask.requires_grad_(requires_grad=True)
intermediate_importance = torch.zeros(n_layers, n_intermediate).to(old_intermediate_mask)
intermediate_mask = torch.ones(n_layers, n_intermediate).to(old_intermediate_mask)[:] = old_intermediate_mask.clone()
intermediate_mask.requires_grad_(requires_grad=True)
batch_size = trainer.args.train_batch_size
if subset_size <= 1:
subset_size *= len(dataset)
n_prune_steps = int(np.ceil(int(subset_size) / batch_size))
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=trainer.data_collator,
drop_last=trainer.args.dataloader_drop_last,
num_workers=trainer.args.dataloader_num_workers,
pin_memory=trainer.args.dataloader_pin_memory,
)
dataloader = islice(dataloader, n_prune_steps)
prune_iterator = tqdm(
dataloader,
desc="Iteration",
disable=disable_progress_bar,
total=n_prune_steps
)
for inputs in prune_iterator:
# key: add head mask and intermediate mask, so we can get the gradients from them
inputs['head_mask'] = head_mask
inputs['intermediate_mask'] = intermediate_mask
inputs = trainer._prepare_inputs(inputs)
loss = trainer.compute_loss(model, inputs)
loss.backward()
head_importance += head_mask.grad.abs().detach()
intermediate_importance += intermediate_mask.grad.abs().detach()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0)
torch.nn.utils.clip_grad_norm_(head_mask, 0)
torch.nn.utils.clip_grad_norm_(intermediate_mask, 0)
if normalize_scores_by_layer:
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
norm_by_layer = torch.pow(torch.pow(intermediate_importance, exponent).sum(-1), 1/exponent)
intermediate_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if training_flag:
model.train()
return head_importance, intermediate_importance | null |
19,500 | import torch
from torch.utils.data import DataLoader, SequentialSampler
import numpy as np
from itertools import islice
from tqdm import tqdm
from math import sqrt
from collections import defaultdict
def what_to_prune_head(
head_importance,
n_to_prune,
old_head_mask,
at_least_x_heads_per_layer=1,
):
head_importance = head_importance.clone()
n_layers, n_heads = head_importance.size()
already_prune = {}
for layer in range(old_head_mask.size(0)):
for head in range(old_head_mask.size(1)):
if old_head_mask[layer][head].item() == 0:
if layer not in already_prune:
already_prune[layer] = []
already_prune[layer].append(head)
# Sort heads by score
heads_and_score = [
((layer, head), head_importance[layer][head].item())
for layer in range(n_layers)
for head in range(n_heads)
]
heads_and_score = sorted(heads_and_score, key=lambda x: x[1])
sorted_heads = [head_and_score[0]
for head_and_score in heads_and_score]
# Ensure we don't delete all heads in a layer
if at_least_x_heads_per_layer:
# Remove the top scoring head in each layer
to_protect = {l: 0 for l in range(n_layers)}
filtered_sorted_heads = []
for layer, head in reversed(sorted_heads):
if layer in to_protect:
if to_protect[layer] < at_least_x_heads_per_layer:
to_protect[layer] += 1
continue
else:
to_protect.pop(layer)
filtered_sorted_heads.insert(0, (layer, head))
sorted_heads = filtered_sorted_heads
# layer/heads that were already pruned
# Prune the lowest scoring heads
sorted_heads = [
(layer, head)
for (layer, head) in sorted_heads
if layer not in already_prune or head not in already_prune[layer]
]
old_head_mask = old_head_mask.clone()
new_head_mask = old_head_mask.clone()
# Update heads to prune
for layer, head in sorted_heads[:n_to_prune]:
new_head_mask[layer][head] = 0
return new_head_mask | null |
19,501 | import torch
from torch.utils.data import DataLoader, SequentialSampler
import numpy as np
from itertools import islice
from tqdm import tqdm
from math import sqrt
from collections import defaultdict
def what_to_prune_mlp(
intermediate_importance,
n_to_prune,
old_intermediate_mask
):
intermediate_importance = intermediate_importance.clone()
n_layers, n_intermediate = intermediate_importance.size()
already_prune = defaultdict(list)
for layer in range(n_layers):
for intermediate_idx in range(n_intermediate):
if old_intermediate_mask[layer][intermediate_idx].item() == 0:
already_prune[layer].append(intermediate_idx)
score = [
((layer, intermediate_idx), intermediate_importance[layer][intermediate_idx].item())
for layer in range(n_layers) for intermediate_idx in range(n_intermediate)
]
score.sort(key=lambda x:x[-1])
filter_score = [
((layer, intermediate_idx), score)
for ((layer, intermediate_idx), score) in score
if layer not in already_prune or intermediate_idx not in already_prune[layer]
]
old_intermediate_mask = old_intermediate_mask.clone()
new_intermediate_mask = old_intermediate_mask.clone()
for (layer, intermediate_idx), _ in filter_score[:n_to_prune]:
new_intermediate_mask[layer][intermediate_idx] = 0
return new_intermediate_mask | null |
19,502 | import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
from torch.utils.data import DataLoader
from model.PruneBert import PruneBertForSequenceClassification
from model.TeacherBert import TeacherBertForSequenceClassification
from prune.prune_utils import determine_pruning_sequence, what_to_prune_head, calculate_head_and_intermediate_importance, what_to_prune_mlp
from transformers.models.bert import BertPreTrainedModel, BertModel
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.modeling_outputs import SequenceClassifierOutput
import torch.nn as nn
from tqdm import tqdm
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, CAPTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset("glue.py", data_args.task_name)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
teacher = TeacherBertForSequenceClassification.from_pretrained(
training_args.teacher_path,
from_tf=bool(".ckpt" in training_args.teacher_path),
config=config,
alignrep=training_args.alignrep,
)
teacher = teacher.cuda()
model = PruneBertForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
contrastive_temperature=training_args.contrastive_temperature,
ce_loss_weight=training_args.ce_loss_weight,
cl_unsupervised_loss_weight=training_args.cl_unsupervised_loss_weight,
cl_supervised_loss_weight=training_args.cl_supervised_loss_weight,
distill_loss_weight=training_args.distill_loss_weight,
extra_examples=training_args.extra_examples,
alignrep=training_args.alignrep,
get_teacher_logits=teacher.get_teacher_logits if training_args.use_distill else None,
distill_temperature=training_args.distill_temperature,
)
# Preprocessing the datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warn(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
def add_id(examples, idx):
examples['idx'] = idx
return examples
train_dataset = train_dataset.map(
add_id,
batched=True,
with_indices=True,
)
if training_args.do_eval:
if "validation" not in datasets and "validation_matched" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
if "test" not in datasets and "test_matched" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("metric_glue.py", data_args.task_name)
# TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
global_representations_bank_finetuned = None
global_representations_bank_pretrained = None
global_representations_bank_snaps = None
# encode training examples using fine-tuned model (teacher)
if training_args.use_contrastive_loss:
dataloader = DataLoader(
train_dataset,
batch_size=trainer.args.train_batch_size,
shuffle=False,
collate_fn=trainer.data_collator,
drop_last=trainer.args.dataloader_drop_last,
num_workers=trainer.args.dataloader_num_workers,
pin_memory=trainer.args.dataloader_pin_memory,
)
with torch.no_grad():
for inputs in tqdm(dataloader):
inputs = trainer._prepare_inputs(inputs)
representations = teacher(encode_example=True, **inputs).cpu()
if global_representations_bank_finetuned is None:
global_representations_bank_finetuned = representations
else:
global_representations_bank_finetuned = torch.cat((global_representations_bank_finetuned, representations), dim=0)
global_representations_bank_finetuned = global_representations_bank_finetuned.unsqueeze(1)
if not training_args.use_distill:
teacher = None
if training_args.do_prune:
model = trainer._wrap_model(trainer.model, training=False)
model = model.module if hasattr(model, 'module') else model
# Determine the number of heads to prune
prune_percent = training_args.prune_percent
prune_percent = None if prune_percent == '' else [float(x) for x in prune_percent.split(',')]
prune_sequence_head, prune_sequence_intermediate = determine_pruning_sequence(
prune_percent,
config.num_hidden_layers,
config.num_attention_heads,
config.intermediate_size,
training_args.at_least_x_heads_per_layer,
)
prune_sequence = zip(prune_sequence_head, prune_sequence_intermediate)
for step, (n_to_prune_head, n_to_prune_intermediate) in enumerate(prune_sequence):
logger.info("We are going to prune {} heads and {} intermediate !!!".format(n_to_prune_head, n_to_prune_intermediate))
head_importance, intermediate_importance = calculate_head_and_intermediate_importance(
model,
train_dataset,
old_head_mask=model.head_mask,
old_intermediate_mask=model.intermediate_mask,
trainer=trainer,
normalize_scores_by_layer=training_args.normalize_pruning_by_layer,
subset_size=training_args.subset_ratio
)
for layer in range(len(head_importance)):
layer_scores = head_importance[layer].cpu().data
logger.info("head importance score")
logger.info("\t".join(f"{x:.5f}" for x in layer_scores))
# Determine which heads to prune
new_head_mask = what_to_prune_head(
head_importance,
n_to_prune=n_to_prune_head,
old_head_mask=model.head_mask,
at_least_x_heads_per_layer=training_args.at_least_x_heads_per_layer,
)
new_intermediate_mask = what_to_prune_mlp(
intermediate_importance,
n_to_prune=n_to_prune_intermediate,
old_intermediate_mask=model.intermediate_mask
)
for layer in range(len(new_head_mask)):
y = new_head_mask[layer].cpu().data
logger.info("head mask")
logger.info("\t".join("{}".format(int(x)) for x in y))
logger.info("intermediate mask")
for layer in range(len(new_intermediate_mask)):
y = new_intermediate_mask[layer]
logger.info("Layer {} has {} intermediate active.".format(layer, torch.sum(y)))
# calculate and store example representations and labels (for verification)
if training_args.use_contrastive_loss:
representations_bank = None
labels_bank = None
dataloader = DataLoader(
train_dataset,
batch_size=trainer.args.train_batch_size,
shuffle=False,
collate_fn=trainer.data_collator,
drop_last=trainer.args.dataloader_drop_last,
num_workers=trainer.args.dataloader_num_workers,
pin_memory=trainer.args.dataloader_pin_memory,
)
with torch.no_grad():
for inputs in tqdm(dataloader):
inputs = trainer._prepare_inputs(inputs)
labels = inputs['labels'].cpu()
representations = model(encode_example=True, **inputs).cpu()
if representations_bank is None:
representations_bank = representations
labels_bank = labels
else:
representations_bank = torch.cat((representations_bank, representations), dim=0)
labels_bank = torch.cat((labels_bank, labels), dim=0)
if step == 0:
# add to global representations bank for pretrained
global_representations_bank_pretrained = representations_bank
global_representations_bank_pretrained = global_representations_bank_pretrained.unsqueeze(1)
else:
# add to global representations bank for snaps
if global_representations_bank_snaps is None:
global_representations_bank_snaps = representations_bank.unsqueeze(1)
else:
global_representations_bank_snaps = torch.cat((global_representations_bank_snaps, representations_bank.unsqueeze(1)), dim=1)
# update bank
model.global_representations_bank_finetuned = global_representations_bank_finetuned
model.global_representations_bank_pretrained = global_representations_bank_pretrained
model.global_representations_bank_snaps = global_representations_bank_snaps
model.global_labels_bank = labels_bank
# apply structured pruing
model.head_mask[:] = new_head_mask.clone()
model.intermediate_mask[:] = new_intermediate_mask.clone()
# re-train
trainer.optimizer = trainer.lr_scheduler = None
trainer.args.num_train_epochs = training_args.retrain_num_train_epochs
trainer.train()
# re-eval
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(datasets["validation_mismatched"])
for eval_d, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_d)
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_d)
metrics["eval_samples"] = min(max_val_samples, len(eval_d))
new_metrics = {}
for k in metrics:
new_metrics["{}_{}_{}".format(task, k, step+1)] = metrics[k]
metrics = new_metrics
trainer.log_metrics("{}_eval_{}".format(task, step+1), metrics)
trainer.save_metrics("{}_eval_{}".format(task, step+1), metrics)
trainer.save_model()
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(datasets["validation_mismatched"])
for eval_d, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_d)
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_d)
metrics["eval_samples"] = min(max_val_samples, len(eval_d))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Test ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
test_datasets.append(datasets["test_mismatched"])
for test_dataset, task in zip(test_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
test_dataset.remove_columns_("label")
predictions = trainer.predict(test_dataset=test_dataset).predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_test_file, "w") as writer:
logger.info(f"***** Test results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
19,503 | import collections
import json
import logging
import os
from typing import Optional, Tuple
import numpy as np
from tqdm.auto import tqdm
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `postprocess_qa_predictions` function. Write a Python function `def postprocess_qa_predictions( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, null_score_diff_threshold: float = 0.0, output_dir: Optional[str] = None, prefix: Optional[str] = None, is_world_process_zero: bool = True, )` to solve the following problem:
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example (note that the score of the null answer for an example giving several features is the minimum of the scores for the null answer on each feature: all features must be aligned on the fact they `want` to predict a null answer). Only useful when :obj:`version_2_with_negative` is :obj:`True`. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this process is the main process or not (used to determine if logging/saves should be done).
Here is the function:
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
is_world_process_zero: bool = True,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether this process is the main process or not (used to determine if logging/saves should be done).
"""
assert len(predictions) == 2, "`predictions` should be a tuple with two elements (start_logits, end_logits)."
all_start_logits, all_end_logits = predictions
assert len(predictions[0]) == len(features), f"Got {len(predictions[0])} predictions and {len(features)} features."
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if min_null_prediction is None or min_null_prediction["score"] > feature_null_score:
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(p["offsets"] == (0, 0) for p in predictions):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""):
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"]
scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
assert os.path.isdir(output_dir), f"{output_dir} is not a directory."
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"predictions_{prefix}".json
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"nbest_predictions_{prefix}".json
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"null_odds_{prefix}".json
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions | Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example (note that the score of the null answer for an example giving several features is the minimum of the scores for the null answer on each feature: all features must be aligned on the fact they `want` to predict a null answer). Only useful when :obj:`version_2_with_negative` is :obj:`True`. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this process is the main process or not (used to determine if logging/saves should be done). |
19,504 | import collections
import json
import logging
import os
from typing import Optional, Tuple
import numpy as np
from tqdm.auto import tqdm
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `postprocess_qa_predictions_with_beam_search` function. Write a Python function `def postprocess_qa_predictions_with_beam_search( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, start_n_top: int = 5, end_n_top: int = 5, output_dir: Optional[str] = None, prefix: Optional[str] = None, is_world_process_zero: bool = True, )` to solve the following problem:
Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as cls token predictions. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. start_n_top (:obj:`int`, `optional`, defaults to 5): The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. end_n_top (:obj:`int`, `optional`, defaults to 5): The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this process is the main process or not (used to determine if logging/saves should be done).
Here is the function:
def postprocess_qa_predictions_with_beam_search(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
start_n_top: int = 5,
end_n_top: int = 5,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
is_world_process_zero: bool = True,
):
"""
Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the
original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as
cls token predictions.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
start_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top start logits too keep when searching for the :obj:`n_best_size` predictions.
end_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top end logits too keep when searching for the :obj:`n_best_size` predictions.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether this process is the main process or not (used to determine if logging/saves should be done).
"""
assert len(predictions) == 5, "`predictions` should be a tuple with five elements."
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions
assert len(predictions[0]) == len(
features
), f"Got {len(predictions[0])} predicitions and {len(features)} features."
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict() if version_2_with_negative else None
# Logging.
logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction
if min_null_score is None or feature_null_score < min_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits.
for i in range(start_n_top):
for j in range(end_n_top):
start_index = start_indexes[i]
j_index = i * end_n_top + j
end_index = end_indexes[j_index]
# Don't consider out-of-scope answers (last part of the test should be unnecessary because of the
# p_mask but let's not take any risk)
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length negative or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_log_prob[i] + end_log_prob[j_index],
"start_log_prob": start_log_prob[i],
"end_log_prob": end_log_prob[j_index],
}
)
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0:
predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction and set the probability for the null answer.
all_predictions[example["id"]] = predictions[0]["text"]
if version_2_with_negative:
scores_diff_json[example["id"]] = float(min_null_score)
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
assert os.path.isdir(output_dir), f"{output_dir} is not a directory."
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"predictions_{prefix}".json
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"nbest_predictions_{prefix}".json
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"null_odds_{prefix}".json
)
print(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
print(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
print(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json | Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as cls token predictions. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. start_n_top (:obj:`int`, `optional`, defaults to 5): The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. end_n_top (:obj:`int`, `optional`, defaults to 5): The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this process is the main process or not (used to determine if logging/saves should be done). |
19,505 | import torch.nn as nn
import torch
import inspect
from transformers.models.bert import BertPreTrainedModel
from transformers.models.bert.modeling_bert import BertEmbeddings, BertPooler, BaseModelOutputWithPoolingAndCrossAttentions, BertAttention, BertIntermediate, BaseModelOutputWithPastAndCrossAttentions
from typing import Callable
The provided code snippet includes necessary dependencies for implementing the `apply_chunking_to_forward` function. Write a Python function `def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], intermediate_mask, chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor` to solve the following problem:
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory. If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as directly applying :obj:`forward_fn` to :obj:`input_tensors`. Args: forward_fn (:obj:`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (:obj:`int`): The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (:obj:`int`): The dimension over which the :obj:`input_tensors` should be chunked. input_tensors (:obj:`Tuple[torch.Tensor]`): The input tensors of ``forward_fn`` which will be chunked Returns: :obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`. Examples:: # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
Here is the function:
def apply_chunking_to_forward(
forward_fn: Callable[..., torch.Tensor], intermediate_mask, chunk_size: int, chunk_dim: int, *input_tensors
) -> torch.Tensor:
"""
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.
If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Args:
forward_fn (:obj:`Callable[..., torch.Tensor]`):
The forward function of the model.
chunk_size (:obj:`int`):
The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
chunk_dim (:obj:`int`):
The dimension over which the :obj:`input_tensors` should be chunked.
input_tensors (:obj:`Tuple[torch.Tensor]`):
The input tensors of ``forward_fn`` which will be chunked
Returns:
:obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`.
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
"""
assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
tensor_shape = input_tensors[0].shape[chunk_dim]
assert all(
input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
if num_args_in_forward_chunk_fn - 1 != len(input_tensors):
raise ValueError(
f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input "
"tensors are given"
)
if chunk_size > 0:
if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
raise ValueError(
f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
f"size {chunk_size}"
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
# apply forward fn to every tuple
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(intermediate_mask, *input_tensors) | This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory. If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as directly applying :obj:`forward_fn` to :obj:`input_tensors`. Args: forward_fn (:obj:`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (:obj:`int`): The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (:obj:`int`): The dimension over which the :obj:`input_tensors` should be chunked. input_tensors (:obj:`Tuple[torch.Tensor]`): The input tensors of ``forward_fn`` which will be chunked Returns: :obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`. Examples:: # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) |
19,506 | import argparse
import json
import re
import string
import sys
from collections import Counter
def f1_score(prediction, ground_truth):
def exact_match_score(prediction, ground_truth):
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x["text"], qa["answers"]))
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1} | null |
19,507 | import math
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x)) | null |
19,508 | import math
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 | null |
19,509 | import math
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
def schedule_func(sch):
try:
f = eval(sch)
except:
f = warmup_linear
return f | null |
19,510 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
The provided code snippet includes necessary dependencies for implementing the `printable_text` function. Write a Python function `def printable_text(text)` to solve the following problem:
Returns text encoded in a way suitable for print or `tf.logging`.
Here is the function:
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?") | Returns text encoded in a way suitable for print or `tf.logging`. |
19,511 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
19,512 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a peice of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a peice of text. |
19,513 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
19,514 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | Checks whether `chars` is a control character. |
19,515 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
19,516 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import sys
import torch
import json
from torch.utils.data import Dataset, Sampler, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import numpy as np
import scipy.stats as sp
from multiprocessing import Pool
import multiprocessing as mp
from itertools import repeat
import tokenization
from modeling import BertConfig, BertForSequenceClassificationMultiTask
from optimization import BERTAdam, Adamax
def batchify(batch):
seq_len = max([len(feature.input_ids) for feature in batch])
input_ids, input_mask, segment_ids, label_id, label_index = \
list(), list(), list(), list(), list()
for feature in batch:
padding = [0 for _ in range(seq_len - len(feature.input_ids))]
input_ids_ins = feature.input_ids
input_mask_ins = feature.input_mask
segment_ids_ins = feature.segment_ids
input_ids_ins.extend(padding), input_mask_ins.extend(padding), segment_ids_ins.extend(padding)
input_ids.append(torch.tensor(input_ids_ins, dtype=torch.long))
input_mask.append(torch.tensor(input_mask_ins, dtype=torch.long))
segment_ids.append(torch.tensor(segment_ids_ins, dtype=torch.long))
label_id.append(torch.tensor(feature.label_id, dtype=torch.float))
label_index.append(torch.tensor(feature.index, dtype=torch.long))
input_ids = torch.stack(input_ids, 0)
input_mask = torch.stack(input_mask, 0)
segment_ids = torch.stack(segment_ids, 0)
label_id = torch.stack(label_id, 0)
label_index = torch.stack(label_index, 0)
return input_ids, input_mask, segment_ids, label_id, label_index | null |
19,517 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import sys
import torch
import json
from torch.utils.data import Dataset, Sampler, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import numpy as np
import scipy.stats as sp
from multiprocessing import Pool
import multiprocessing as mp
from itertools import repeat
import tokenization
from modeling import BertConfig, BertForSequenceClassificationMultiTask
from optimization import BERTAdam, Adamax
logger = logging.getLogger(__name__)
def examples_to_features_worker(example, max_seq_length, tokenizer, label_map, index, max_index, is_training, args):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
if not (is_training and args.fast_train):
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
else:
assert len(input_ids) == len(input_mask) == len(segment_ids)
label_id = [0] * max_index
if len(label_map[index]) != 0:
label_id[index] = label_map[index][example.label]
else:
label_id[index] = example.label
return InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
index=index,)
The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(args, examples, label_lists, max_seq_length, tokenizer, index, max_index, is_training=False)` to solve the following problem:
Loads a data file into a list of `InputBatch`s.
Here is the function:
def convert_examples_to_features(args, examples, label_lists, max_seq_length, tokenizer, index, max_index, is_training=False):
"""Loads a data file into a list of `InputBatch`s."""
label_map_lst = []
for label_list in label_lists:
label_map = {}
if len(label_list)!= 1:
for (i, label) in enumerate(label_list):
label_map[label] = i
label_map_lst.append(label_map)
pool = Pool(mp.cpu_count())
logger.info('start tokenize')
features = pool.starmap(examples_to_features_worker, zip(examples, repeat(max_seq_length), repeat(tokenizer), repeat(label_map_lst), repeat(index), repeat(max_index), repeat(is_training), repeat(args)))
pool.close()
pool.join()
return features | Loads a data file into a list of `InputBatch`s. |
19,518 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import sys
import torch
import json
from torch.utils.data import Dataset, Sampler, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import numpy as np
import scipy.stats as sp
from multiprocessing import Pool
import multiprocessing as mp
from itertools import repeat
import tokenization
from modeling import BertConfig, BertForSequenceClassificationMultiTask
from optimization import BERTAdam, Adamax
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs==labels) | null |
19,519 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import sys
import torch
import json
from torch.utils.data import Dataset, Sampler, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import numpy as np
import scipy.stats as sp
from multiprocessing import Pool
import multiprocessing as mp
from itertools import repeat
import tokenization
from modeling import BertConfig, BertForSequenceClassificationMultiTask
from optimization import BERTAdam, Adamax
def matthew_corr(tp, tn, fp, fn):
denominator = np.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))
if denominator == 0:
return 0
else:
return (tp*tn-fp*fn)/denominator | null |
19,520 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import sys
import torch
import json
from torch.utils.data import Dataset, Sampler, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import numpy as np
import scipy.stats as sp
from multiprocessing import Pool
import multiprocessing as mp
from itertools import repeat
import tokenization
from modeling import BertConfig, BertForSequenceClassificationMultiTask
from optimization import BERTAdam, Adamax
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `copy_optimizer_params_to_model` function. Write a Python function `def copy_optimizer_params_to_model(named_params_model, named_params_optimizer)` to solve the following problem:
Utility function for optimize_on_cpu and 16-bits training. Copy the parameters optimized on CPU/RAM back to the model on GPU
Here is the function:
def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the parameters optimized on CPU/RAM back to the model on GPU
"""
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
param_model.data.copy_(param_opti.data) | Utility function for optimize_on_cpu and 16-bits training. Copy the parameters optimized on CPU/RAM back to the model on GPU |
19,521 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import sys
import torch
import json
from torch.utils.data import Dataset, Sampler, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import numpy as np
import scipy.stats as sp
from multiprocessing import Pool
import multiprocessing as mp
from itertools import repeat
import tokenization
from modeling import BertConfig, BertForSequenceClassificationMultiTask
from optimization import BERTAdam, Adamax
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `set_optimizer_params_grad` function. Write a Python function `def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False)` to solve the following problem:
Utility function for optimize_on_cpu and 16-bits training. Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model
Here is the function:
def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model
"""
is_nan = False
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
if test_nan and torch.isnan(param_model.grad).sum() > 0:
is_nan = True
if param_opti.grad is None:
param_opti.grad = torch.nn.Parameter(param_opti.data.new().resize_(*param_opti.data.size()))
param_opti.grad.data.copy_(param_model.grad.data)
return is_nan | Utility function for optimize_on_cpu and 16-bits training. Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model |
19,522 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import math
import six
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Here is the function:
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) |
19,523 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import math
import six
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
import numpy as np
def dim_dropout(x, p=0, dim=-1, training=False):
if training == False or p == 0:
return x
dropout_mask = torch.bernoulli((1 - p) * (x.data.new(x.size()).zero_() + 1))
return dropout_mask * (dropout_mask.size(dim) / torch.sum(dropout_mask, dim=dim, keepdim=True)) * x | null |
19,524 | import collections.abc as collections
from pathlib import Path
from types import SimpleNamespace
from typing import Callable, List, Optional, Tuple, Union
import cv2
import kornia
import numpy as np
import torch
def read_image(path: Path, grayscale: bool = False) -> np.ndarray:
"""Read an image from path as RGB or grayscale"""
if not Path(path).exists():
raise FileNotFoundError(f"No image at path {path}.")
mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_COLOR
image = cv2.imread(str(path), mode)
if image is None:
raise IOError(f"Could not read image at {path}.")
if not grayscale:
image = image[..., ::-1]
return image
def numpy_image_to_torch(image: np.ndarray) -> torch.Tensor:
"""Normalize the image tensor and reorder the dimensions."""
if image.ndim == 3:
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
elif image.ndim == 2:
image = image[None] # add channel axis
else:
raise ValueError(f"Not an image: {image.shape}")
return torch.tensor(image / 255.0, dtype=torch.float)
def resize_image(
image: np.ndarray,
size: Union[List[int], int],
fn: str = "max",
interp: Optional[str] = "area",
) -> np.ndarray:
"""Resize an image to a fixed size, or according to max or min edge."""
h, w = image.shape[:2]
fn = {"max": max, "min": min}[fn]
if isinstance(size, int):
scale = size / fn(h, w)
h_new, w_new = int(round(h * scale)), int(round(w * scale))
scale = (w_new / w, h_new / h)
elif isinstance(size, (tuple, list)):
h_new, w_new = size
scale = (w_new / w, h_new / h)
else:
raise ValueError(f"Incorrect new size: {size}")
mode = {
"linear": cv2.INTER_LINEAR,
"cubic": cv2.INTER_CUBIC,
"nearest": cv2.INTER_NEAREST,
"area": cv2.INTER_AREA,
}[interp]
return cv2.resize(image, (w_new, h_new), interpolation=mode), scale
def load_image(path: Path, resize: int = None, **kwargs) -> torch.Tensor:
image = read_image(path)
if resize is not None:
image, _ = resize_image(image, resize, **kwargs)
return numpy_image_to_torch(image) | null |
19,525 | import collections.abc as collections
from pathlib import Path
from types import SimpleNamespace
from typing import Callable, List, Optional, Tuple, Union
import cv2
import kornia
import numpy as np
import torch
def batch_to_device(batch: dict, device: str = "cpu", non_blocking: bool = True):
"""Move batch (dict) to device"""
def _func(tensor):
return tensor.to(device=device, non_blocking=non_blocking).detach()
return map_tensor(batch, _func)
def rbd(data: dict) -> dict:
"""Remove batch dimension from elements in data"""
return {
k: v[0] if isinstance(v, (torch.Tensor, np.ndarray, list)) else v
for k, v in data.items()
}
The provided code snippet includes necessary dependencies for implementing the `match_pair` function. Write a Python function `def match_pair( extractor, matcher, image0: torch.Tensor, image1: torch.Tensor, device: str = "cpu", **preprocess, )` to solve the following problem:
Match a pair of images (image0, image1) with an extractor and matcher
Here is the function:
def match_pair(
extractor,
matcher,
image0: torch.Tensor,
image1: torch.Tensor,
device: str = "cpu",
**preprocess,
):
"""Match a pair of images (image0, image1) with an extractor and matcher"""
feats0 = extractor.extract(image0, **preprocess)
feats1 = extractor.extract(image1, **preprocess)
matches01 = matcher({"image0": feats0, "image1": feats1})
data = [feats0, feats1, matches01]
# remove batch dim and move to target device
feats0, feats1, matches01 = [batch_to_device(rbd(x), device) for x in data]
return feats0, feats1, matches01 | Match a pair of images (image0, image1) with an extractor and matcher |
19,526 | from typing import Callable, Optional
import torch
import torch.nn.functional as F
import torchvision
from kornia.color import grayscale_to_rgb
from torch import nn
from torch.nn.modules.utils import _pair
from torchvision.models import resnet
from .utils import Extractor
def get_patches(
tensor: torch.Tensor, required_corners: torch.Tensor, ps: int
) -> torch.Tensor:
c, h, w = tensor.shape
corner = (required_corners - ps / 2 + 1).long()
corner[:, 0] = corner[:, 0].clamp(min=0, max=w - 1 - ps)
corner[:, 1] = corner[:, 1].clamp(min=0, max=h - 1 - ps)
offset = torch.arange(0, ps)
kw = {"indexing": "ij"} if torch.__version__ >= "1.10" else {}
x, y = torch.meshgrid(offset, offset, **kw)
patches = torch.stack((x, y)).permute(2, 1, 0).unsqueeze(2)
patches = patches.to(corner) + corner[None, None]
pts = patches.reshape(-1, 2)
sampled = tensor.permute(1, 2, 0)[tuple(pts.T)[::-1]]
sampled = sampled.reshape(ps, ps, -1, c)
assert sampled.shape[:3] == patches.shape[:3]
return sampled.permute(2, 3, 0, 1) | null |
19,527 | from typing import Callable, Optional
import torch
import torch.nn.functional as F
import torchvision
from kornia.color import grayscale_to_rgb
from torch import nn
from torch.nn.modules.utils import _pair
from torchvision.models import resnet
from .utils import Extractor
The provided code snippet includes necessary dependencies for implementing the `simple_nms` function. Write a Python function `def simple_nms(scores: torch.Tensor, nms_radius: int)` to solve the following problem:
Fast Non-maximum suppression to remove nearby points
Here is the function:
def simple_nms(scores: torch.Tensor, nms_radius: int):
"""Fast Non-maximum suppression to remove nearby points"""
zeros = torch.zeros_like(scores)
max_mask = scores == torch.nn.functional.max_pool2d(
scores, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius
)
for _ in range(2):
supp_mask = (
torch.nn.functional.max_pool2d(
max_mask.float(),
kernel_size=nms_radius * 2 + 1,
stride=1,
padding=nms_radius,
)
> 0
)
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == torch.nn.functional.max_pool2d(
supp_scores, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius
)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros) | Fast Non-maximum suppression to remove nearby points |
19,528 | from typing import Callable, Optional
import torch
import torch.nn.functional as F
import torchvision
from kornia.color import grayscale_to_rgb
from torch import nn
from torch.nn.modules.utils import _pair
from torchvision.models import resnet
from .utils import Extractor
class DeformableConv2d(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
mask=False,
):
super(DeformableConv2d, self).__init__()
self.padding = padding
self.mask = mask
self.channel_num = (
3 * kernel_size * kernel_size if mask else 2 * kernel_size * kernel_size
)
self.offset_conv = nn.Conv2d(
in_channels,
self.channel_num,
kernel_size=kernel_size,
stride=stride,
padding=self.padding,
bias=True,
)
self.regular_conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=self.padding,
bias=bias,
)
def forward(self, x):
h, w = x.shape[2:]
max_offset = max(h, w) / 4.0
out = self.offset_conv(x)
if self.mask:
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
else:
offset = out
mask = None
offset = offset.clamp(-max_offset, max_offset)
x = torchvision.ops.deform_conv2d(
input=x,
offset=offset,
weight=self.regular_conv.weight,
bias=self.regular_conv.bias,
padding=self.padding,
mask=mask,
)
return x
def get_conv(
inplanes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False,
conv_type="conv",
mask=False,
):
if conv_type == "conv":
conv = nn.Conv2d(
inplanes,
planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias,
)
elif conv_type == "dcn":
conv = DeformableConv2d(
inplanes,
planes,
kernel_size=kernel_size,
stride=stride,
padding=_pair(padding),
bias=bias,
mask=mask,
)
else:
raise TypeError
return conv | null |
19,529 | import matplotlib
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `cm_RdGn` function. Write a Python function `def cm_RdGn(x)` to solve the following problem:
Custom colormap: red (0) -> yellow (0.5) -> green (1).
Here is the function:
def cm_RdGn(x):
"""Custom colormap: red (0) -> yellow (0.5) -> green (1)."""
x = np.clip(x, 0, 1)[..., None] * 2
c = x * np.array([[0, 1.0, 0]]) + (2 - x) * np.array([[1.0, 0, 0]])
return np.clip(c, 0, 1) | Custom colormap: red (0) -> yellow (0.5) -> green (1). |
19,530 | import matplotlib
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
import torch
def cm_BlRdGn(x_):
"""Custom colormap: blue (-1) -> red (0.0) -> green (1)."""
x = np.clip(x_, 0, 1)[..., None] * 2
c = x * np.array([[0, 1.0, 0, 1.0]]) + (2 - x) * np.array([[1.0, 0, 0, 1.0]])
xn = -np.clip(x_, -1, 0)[..., None] * 2
cn = xn * np.array([[0, 0.1, 1, 1.0]]) + (2 - xn) * np.array([[1.0, 0, 0, 1.0]])
out = np.clip(np.where(x_[..., None] < 0, cn, c), 0, 1)
return out
The provided code snippet includes necessary dependencies for implementing the `cm_prune` function. Write a Python function `def cm_prune(x_)` to solve the following problem:
Custom colormap to visualize pruning
Here is the function:
def cm_prune(x_):
"""Custom colormap to visualize pruning"""
if isinstance(x_, torch.Tensor):
x_ = x_.cpu().numpy()
max_i = max(x_)
norm_x = np.where(x_ == max_i, -1, (x_ - 1) / 9)
return cm_BlRdGn(norm_x) | Custom colormap to visualize pruning |
19,531 | import matplotlib
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `plot_images` function. Write a Python function `def plot_images(imgs, titles=None, cmaps="gray", dpi=100, pad=0.5, adaptive=True)` to solve the following problem:
Plot a set of images horizontally. Args: imgs: list of NumPy RGB (H, W, 3) or PyTorch RGB (3, H, W) or mono (H, W). titles: a list of strings, as titles for each image. cmaps: colormaps for monochrome images. adaptive: whether the figure size should fit the image aspect ratios.
Here is the function:
def plot_images(imgs, titles=None, cmaps="gray", dpi=100, pad=0.5, adaptive=True):
"""Plot a set of images horizontally.
Args:
imgs: list of NumPy RGB (H, W, 3) or PyTorch RGB (3, H, W) or mono (H, W).
titles: a list of strings, as titles for each image.
cmaps: colormaps for monochrome images.
adaptive: whether the figure size should fit the image aspect ratios.
"""
# conversion to (H, W, 3) for torch.Tensor
imgs = [
img.permute(1, 2, 0).cpu().numpy()
if (isinstance(img, torch.Tensor) and img.dim() == 3)
else img
for img in imgs
]
n = len(imgs)
if not isinstance(cmaps, (list, tuple)):
cmaps = [cmaps] * n
if adaptive:
ratios = [i.shape[1] / i.shape[0] for i in imgs] # W / H
else:
ratios = [4 / 3] * n
figsize = [sum(ratios) * 4.5, 4.5]
fig, ax = plt.subplots(
1, n, figsize=figsize, dpi=dpi, gridspec_kw={"width_ratios": ratios}
)
if n == 1:
ax = [ax]
for i in range(n):
ax[i].imshow(imgs[i], cmap=plt.get_cmap(cmaps[i]))
ax[i].get_yaxis().set_ticks([])
ax[i].get_xaxis().set_ticks([])
ax[i].set_axis_off()
for spine in ax[i].spines.values(): # remove frame
spine.set_visible(False)
if titles:
ax[i].set_title(titles[i])
fig.tight_layout(pad=pad) | Plot a set of images horizontally. Args: imgs: list of NumPy RGB (H, W, 3) or PyTorch RGB (3, H, W) or mono (H, W). titles: a list of strings, as titles for each image. cmaps: colormaps for monochrome images. adaptive: whether the figure size should fit the image aspect ratios. |
19,532 | import matplotlib
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `plot_keypoints` function. Write a Python function `def plot_keypoints(kpts, colors="lime", ps=4, axes=None, a=1.0)` to solve the following problem:
Plot keypoints for existing images. Args: kpts: list of ndarrays of size (N, 2). colors: string, or list of list of tuples (one for each keypoints). ps: size of the keypoints as float.
Here is the function:
def plot_keypoints(kpts, colors="lime", ps=4, axes=None, a=1.0):
"""Plot keypoints for existing images.
Args:
kpts: list of ndarrays of size (N, 2).
colors: string, or list of list of tuples (one for each keypoints).
ps: size of the keypoints as float.
"""
if not isinstance(colors, list):
colors = [colors] * len(kpts)
if not isinstance(a, list):
a = [a] * len(kpts)
if axes is None:
axes = plt.gcf().axes
for ax, k, c, alpha in zip(axes, kpts, colors, a):
if isinstance(k, torch.Tensor):
k = k.cpu().numpy()
ax.scatter(k[:, 0], k[:, 1], c=c, s=ps, linewidths=0, alpha=alpha) | Plot keypoints for existing images. Args: kpts: list of ndarrays of size (N, 2). colors: string, or list of list of tuples (one for each keypoints). ps: size of the keypoints as float. |
19,533 | import matplotlib
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `plot_matches` function. Write a Python function `def plot_matches(kpts0, kpts1, color=None, lw=1.5, ps=4, a=1.0, labels=None, axes=None)` to solve the following problem:
Plot matches for a pair of existing images. Args: kpts0, kpts1: corresponding keypoints of size (N, 2). color: color of each match, string or RGB tuple. Random if not given. lw: width of the lines. ps: size of the end points (no endpoint if ps=0) indices: indices of the images to draw the matches on. a: alpha opacity of the match lines.
Here is the function:
def plot_matches(kpts0, kpts1, color=None, lw=1.5, ps=4, a=1.0, labels=None, axes=None):
"""Plot matches for a pair of existing images.
Args:
kpts0, kpts1: corresponding keypoints of size (N, 2).
color: color of each match, string or RGB tuple. Random if not given.
lw: width of the lines.
ps: size of the end points (no endpoint if ps=0)
indices: indices of the images to draw the matches on.
a: alpha opacity of the match lines.
"""
fig = plt.gcf()
if axes is None:
ax = fig.axes
ax0, ax1 = ax[0], ax[1]
else:
ax0, ax1 = axes
if isinstance(kpts0, torch.Tensor):
kpts0 = kpts0.cpu().numpy()
if isinstance(kpts1, torch.Tensor):
kpts1 = kpts1.cpu().numpy()
assert len(kpts0) == len(kpts1)
if color is None:
color = matplotlib.cm.hsv(np.random.rand(len(kpts0))).tolist()
elif len(color) > 0 and not isinstance(color[0], (tuple, list)):
color = [color] * len(kpts0)
if lw > 0:
for i in range(len(kpts0)):
line = matplotlib.patches.ConnectionPatch(
xyA=(kpts0[i, 0], kpts0[i, 1]),
xyB=(kpts1[i, 0], kpts1[i, 1]),
coordsA=ax0.transData,
coordsB=ax1.transData,
axesA=ax0,
axesB=ax1,
zorder=1,
color=color[i],
linewidth=lw,
clip_on=True,
alpha=a,
label=None if labels is None else labels[i],
picker=5.0,
)
line.set_annotation_clip(True)
fig.add_artist(line)
# freeze the axes to prevent the transform to change
ax0.autoscale(enable=False)
ax1.autoscale(enable=False)
if ps > 0:
ax0.scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps)
ax1.scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps) | Plot matches for a pair of existing images. Args: kpts0, kpts1: corresponding keypoints of size (N, 2). color: color of each match, string or RGB tuple. Random if not given. lw: width of the lines. ps: size of the end points (no endpoint if ps=0) indices: indices of the images to draw the matches on. a: alpha opacity of the match lines. |
19,534 | import matplotlib
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
import torch
def add_text(
idx,
text,
pos=(0.01, 0.99),
fs=15,
color="w",
lcolor="k",
lwidth=2,
ha="left",
va="top",
):
ax = plt.gcf().axes[idx]
t = ax.text(
*pos, text, fontsize=fs, ha=ha, va=va, color=color, transform=ax.transAxes
)
if lcolor is not None:
t.set_path_effects(
[
path_effects.Stroke(linewidth=lwidth, foreground=lcolor),
path_effects.Normal(),
]
) | null |
19,535 | import matplotlib
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `save_plot` function. Write a Python function `def save_plot(path, **kw)` to solve the following problem:
Save the current figure without any white margin.
Here is the function:
def save_plot(path, **kw):
"""Save the current figure without any white margin."""
plt.savefig(path, bbox_inches="tight", pad_inches=0, **kw) | Save the current figure without any white margin. |
19,536 | import torch
from kornia.color import rgb_to_grayscale
from torch import nn
from .utils import Extractor
The provided code snippet includes necessary dependencies for implementing the `simple_nms` function. Write a Python function `def simple_nms(scores, nms_radius: int)` to solve the following problem:
Fast Non-maximum suppression to remove nearby points
Here is the function:
def simple_nms(scores, nms_radius: int):
"""Fast Non-maximum suppression to remove nearby points"""
assert nms_radius >= 0
def max_pool(x):
return torch.nn.functional.max_pool2d(
x, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius
)
zeros = torch.zeros_like(scores)
max_mask = scores == max_pool(scores)
for _ in range(2):
supp_mask = max_pool(max_mask.float()) > 0
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == max_pool(supp_scores)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros) | Fast Non-maximum suppression to remove nearby points |
19,537 | import torch
from kornia.color import rgb_to_grayscale
from torch import nn
from .utils import Extractor
def top_k_keypoints(keypoints, scores, k):
if k >= len(keypoints):
return keypoints, scores
scores, indices = torch.topk(scores, k, dim=0, sorted=True)
return keypoints[indices], scores | null |
19,538 | import torch
from kornia.color import rgb_to_grayscale
from torch import nn
from .utils import Extractor
The provided code snippet includes necessary dependencies for implementing the `sample_descriptors` function. Write a Python function `def sample_descriptors(keypoints, descriptors, s: int = 8)` to solve the following problem:
Interpolate descriptors at keypoint locations
Here is the function:
def sample_descriptors(keypoints, descriptors, s: int = 8):
"""Interpolate descriptors at keypoint locations"""
b, c, h, w = descriptors.shape
keypoints = keypoints - s / 2 + 0.5
keypoints /= torch.tensor(
[(w * s - s / 2 - 0.5), (h * s - s / 2 - 0.5)],
).to(
keypoints
)[None]
keypoints = keypoints * 2 - 1 # normalize to (-1, 1)
args = {"align_corners": True} if torch.__version__ >= "1.3" else {}
descriptors = torch.nn.functional.grid_sample(
descriptors, keypoints.view(b, 1, -1, 2), mode="bilinear", **args
)
descriptors = torch.nn.functional.normalize(
descriptors.reshape(b, c, -1), p=2, dim=1
)
return descriptors | Interpolate descriptors at keypoint locations |
19,539 | import warnings
import cv2
import numpy as np
import torch
from kornia.color import rgb_to_grayscale
from packaging import version
from .utils import Extractor
def filter_dog_point(points, scales, angles, image_shape, nms_radius, scores=None):
h, w = image_shape
ij = np.round(points - 0.5).astype(int).T[::-1]
# Remove duplicate points (identical coordinates).
# Pick highest scale or score
s = scales if scores is None else scores
buffer = np.zeros((h, w))
np.maximum.at(buffer, tuple(ij), s)
keep = np.where(buffer[tuple(ij)] == s)[0]
# Pick lowest angle (arbitrary).
ij = ij[:, keep]
buffer[:] = np.inf
o_abs = np.abs(angles[keep])
np.minimum.at(buffer, tuple(ij), o_abs)
mask = buffer[tuple(ij)] == o_abs
ij = ij[:, mask]
keep = keep[mask]
if nms_radius > 0:
# Apply NMS on the remaining points
buffer[:] = 0
buffer[tuple(ij)] = s[keep] # scores or scale
local_max = torch.nn.functional.max_pool2d(
torch.from_numpy(buffer).unsqueeze(0),
kernel_size=nms_radius * 2 + 1,
stride=1,
padding=nms_radius,
).squeeze(0)
is_local_max = buffer == local_max.numpy()
keep = keep[is_local_max[tuple(ij)]]
return keep | null |
19,540 | import warnings
import cv2
import numpy as np
import torch
from kornia.color import rgb_to_grayscale
from packaging import version
from .utils import Extractor
def sift_to_rootsift(x: torch.Tensor, eps=1e-6) -> torch.Tensor:
x = torch.nn.functional.normalize(x, p=1, dim=-1, eps=eps)
x.clip_(min=eps).sqrt_()
return torch.nn.functional.normalize(x, p=2, dim=-1, eps=eps) | null |
19,541 | import warnings
import cv2
import numpy as np
import torch
from kornia.color import rgb_to_grayscale
from packaging import version
from .utils import Extractor
The provided code snippet includes necessary dependencies for implementing the `run_opencv_sift` function. Write a Python function `def run_opencv_sift(features: cv2.Feature2D, image: np.ndarray) -> np.ndarray` to solve the following problem:
Detect keypoints using OpenCV Detector. Optionally, perform description. Args: features: OpenCV based keypoints detector and descriptor image: Grayscale image of uint8 data type Returns: keypoints: 1D array of detected cv2.KeyPoint scores: 1D array of responses descriptors: 1D array of descriptors
Here is the function:
def run_opencv_sift(features: cv2.Feature2D, image: np.ndarray) -> np.ndarray:
"""
Detect keypoints using OpenCV Detector.
Optionally, perform description.
Args:
features: OpenCV based keypoints detector and descriptor
image: Grayscale image of uint8 data type
Returns:
keypoints: 1D array of detected cv2.KeyPoint
scores: 1D array of responses
descriptors: 1D array of descriptors
"""
detections, descriptors = features.detectAndCompute(image, None)
points = np.array([k.pt for k in detections], dtype=np.float32)
scores = np.array([k.response for k in detections], dtype=np.float32)
scales = np.array([k.size for k in detections], dtype=np.float32)
angles = np.deg2rad(np.array([k.angle for k in detections], dtype=np.float32))
return points, scores, scales, angles, descriptors | Detect keypoints using OpenCV Detector. Optionally, perform description. Args: features: OpenCV based keypoints detector and descriptor image: Grayscale image of uint8 data type Returns: keypoints: 1D array of detected cv2.KeyPoint scores: 1D array of responses descriptors: 1D array of descriptors |
19,542 | import warnings
from pathlib import Path
from types import SimpleNamespace
from typing import Callable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
torch.backends.cudnn.deterministic = True
def normalize_keypoints(
kpts: torch.Tensor, size: Optional[torch.Tensor] = None
) -> torch.Tensor:
if size is None:
size = 1 + kpts.max(-2).values - kpts.min(-2).values
elif not isinstance(size, torch.Tensor):
size = torch.tensor(size, device=kpts.device, dtype=kpts.dtype)
size = size.to(kpts)
shift = size / 2
scale = size.max(-1).values / 2
kpts = (kpts - shift[..., None, :]) / scale[..., None, None]
return kpts | null |
19,543 | import warnings
from pathlib import Path
from types import SimpleNamespace
from typing import Callable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
torch.backends.cudnn.deterministic = True
def pad_to_length(x: torch.Tensor, length: int) -> Tuple[torch.Tensor]:
if length <= x.shape[-2]:
return x, torch.ones_like(x[..., :1], dtype=torch.bool)
pad = torch.ones(
*x.shape[:-2], length - x.shape[-2], x.shape[-1], device=x.device, dtype=x.dtype
)
y = torch.cat([x, pad], dim=-2)
mask = torch.zeros(*y.shape[:-1], 1, dtype=torch.bool, device=x.device)
mask[..., : x.shape[-2], :] = True
return y, mask | null |
19,544 | import warnings
from pathlib import Path
from types import SimpleNamespace
from typing import Callable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
torch.backends.cudnn.deterministic = True
def rotate_half(x: torch.Tensor) -> torch.Tensor:
x = x.unflatten(-1, (-1, 2))
x1, x2 = x.unbind(dim=-1)
return torch.stack((-x2, x1), dim=-1).flatten(start_dim=-2)
def apply_cached_rotary_emb(freqs: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
return (t * freqs[0]) + (rotate_half(t) * freqs[1]) | null |
19,545 | import warnings
from pathlib import Path
from types import SimpleNamespace
from typing import Callable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
torch.backends.cudnn.deterministic = True
The provided code snippet includes necessary dependencies for implementing the `sigmoid_log_double_softmax` function. Write a Python function `def sigmoid_log_double_softmax( sim: torch.Tensor, z0: torch.Tensor, z1: torch.Tensor ) -> torch.Tensor` to solve the following problem:
create the log assignment matrix from logits and similarity
Here is the function:
def sigmoid_log_double_softmax(
sim: torch.Tensor, z0: torch.Tensor, z1: torch.Tensor
) -> torch.Tensor:
"""create the log assignment matrix from logits and similarity"""
b, m, n = sim.shape
certainties = F.logsigmoid(z0) + F.logsigmoid(z1).transpose(1, 2)
scores0 = F.log_softmax(sim, 2)
scores1 = F.log_softmax(sim.transpose(-1, -2).contiguous(), 2).transpose(-1, -2)
scores = sim.new_full((b, m + 1, n + 1), 0)
scores[:, :m, :n] = scores0 + scores1 + certainties
scores[:, :-1, -1] = F.logsigmoid(-z0.squeeze(-1))
scores[:, -1, :-1] = F.logsigmoid(-z1.squeeze(-1))
return scores | create the log assignment matrix from logits and similarity |
19,546 | import warnings
from pathlib import Path
from types import SimpleNamespace
from typing import Callable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
torch.backends.cudnn.deterministic = True
The provided code snippet includes necessary dependencies for implementing the `filter_matches` function. Write a Python function `def filter_matches(scores: torch.Tensor, th: float)` to solve the following problem:
obtain matches from a log assignment matrix [Bx M+1 x N+1]
Here is the function:
def filter_matches(scores: torch.Tensor, th: float):
"""obtain matches from a log assignment matrix [Bx M+1 x N+1]"""
max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1)
m0, m1 = max0.indices, max1.indices
indices0 = torch.arange(m0.shape[1], device=m0.device)[None]
indices1 = torch.arange(m1.shape[1], device=m1.device)[None]
mutual0 = indices0 == m1.gather(1, m0)
mutual1 = indices1 == m0.gather(1, m1)
max0_exp = max0.values.exp()
zero = max0_exp.new_tensor(0)
mscores0 = torch.where(mutual0, max0_exp, zero)
mscores1 = torch.where(mutual1, mscores0.gather(1, m1), zero)
valid0 = mutual0 & (mscores0 > th)
valid1 = mutual1 & valid0.gather(1, m1)
m0 = torch.where(valid0, m0, -1)
m1 = torch.where(valid1, m1, -1)
return m0, m1, mscores0, mscores1 | obtain matches from a log assignment matrix [Bx M+1 x N+1] |
19,547 | import argparse
import time
from collections import defaultdict
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch._dynamo
from lightglue import LightGlue, SuperPoint
from lightglue.utils import load_image
torch.set_grad_enabled(False)
def measure(matcher, data, device="cuda", r=100):
timings = np.zeros((r, 1))
if device.type == "cuda":
starter = torch.cuda.Event(enable_timing=True)
ender = torch.cuda.Event(enable_timing=True)
# warmup
for _ in range(10):
_ = matcher(data)
# measurements
with torch.no_grad():
for rep in range(r):
if device.type == "cuda":
starter.record()
_ = matcher(data)
ender.record()
# sync gpu
torch.cuda.synchronize()
curr_time = starter.elapsed_time(ender)
else:
start = time.perf_counter()
_ = matcher(data)
curr_time = (time.perf_counter() - start) * 1e3
timings[rep] = curr_time
mean_syn = np.sum(timings) / r
std_syn = np.std(timings)
return {"mean": mean_syn, "std": std_syn} | null |
19,548 | import argparse
import time
from collections import defaultdict
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch._dynamo
from lightglue import LightGlue, SuperPoint
from lightglue.utils import load_image
def print_as_table(d, title, cnames):
print()
header = f"{title:30} " + " ".join([f"{x:>7}" for x in cnames])
print(header)
print("-" * len(header))
for k, l in d.items():
print(f"{k:30}", " ".join([f"{x:>7.1f}" for x in l])) | null |
19,549 | from collections import Counter
from pathlib import Path
from loguru import logger
from eaio import __electron_source__
from eaio.function.link import link as link_, unlink as unlink_
from eaio.function.check import get_repos_status, find_app_entries, get_files_link_status
from eaio.function.download import download_electron
from eaio.util.error import TargetError, ScanError, RepoError, DownloadError
from eaio.util.status import LinkStatus, RepoRootStatus, RepoStatus, RepoChildStatus
from eaio.util.utils import to_drive, str_size
def __dir_pre(target: Path):
try:
app_entries = find_app_entries(target)
except TargetError as e:
logger.error(e)
exit(1)
except ScanError as e:
logger.error(e)
exit(1)
if len(app_entries) > 1:
logger.info(f'存在多个疑似应用入口: {app_entries}')
app_entry_index_str = input('请输入正确的应用入口下标:')
if not app_entry_index_str:
logger.error('未输入')
exit(1)
app_entry_index = int(app_entry_index_str)
if not (0 <= app_entry_index < len(app_entries)):
logger.error('输入超出范围')
exit(1)
else:
app_entry_index = 0
app_entry, electron_arch, electron_version = app_entries[app_entry_index]
try:
return app_entry, electron_arch, electron_version, [i for i in get_files_link_status(target, app_entry, electron_arch, electron_version)]
except TargetError as e:
logger.error(e)
exit(1)
except RepoError as e:
logger.error(e)
logger.info("正在下载")
download(to_drive(target.drive), electron_version, electron_arch)
link(target) # 重新执行本次操作
class LinkStatus(enum.Enum):
Linked = "已链接"
CanLink = "可链接"
NoMatch = "内容不一致"
NoTarget = "无目标"
IsDir = "文件夹"
def str_size(size_bytes: int) -> str:
"""
将字节数转换为带单位的str
来自: https://stackoverflow.com/a/14822210
:param size_bytes: 字节数
:return: 带单位的str
"""
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
if size_bytes == 0:
i = 0
s = 0
else:
i = int(math.floor(math.log(size_bytes, 1024)))
s = round(size_bytes / math.pow(1024, i), 2)
return "%s %s" % (s, size_name[i])
def unlink(target: Path):
app_entry, electron_arch, electron_version, files = __dir_pre(target)
can_unlink = []
for path, depth, link_status in files:
if link_status == LinkStatus.Linked:
can_unlink.append(path)
logger.info(f'可取消链接 {len(can_unlink)} 个, 共 {str_size(sum(i.stat().st_size for i in can_unlink))}')
unlink_(app_entry, can_unlink)
logger.info('取消链接完成') | null |
19,550 | from collections import Counter
from pathlib import Path
from loguru import logger
from eaio import __electron_source__
from eaio.function.link import link as link_, unlink as unlink_
from eaio.function.check import get_repos_status, find_app_entries, get_files_link_status
from eaio.function.download import download_electron
from eaio.util.error import TargetError, ScanError, RepoError, DownloadError
from eaio.util.status import LinkStatus, RepoRootStatus, RepoStatus, RepoChildStatus
from eaio.util.utils import to_drive, str_size
def __dir_pre(target: Path): # 重新执行本次操作
class LinkStatus(enum.Enum):
def str_size(size_bytes: int) -> str:
def check(target: Path):
app_entry, electron_arch, electron_version, files = __dir_pre(target)
logger.info(f'应用入口为: {app_entry}')
logger.info(f'Electron 版本为: {electron_version}')
logger.info(f'CPU 架构为: {electron_arch}')
logger.info('')
total_sum: Counter[LinkStatus] = Counter()
total_size: Counter[LinkStatus] = Counter()
for path, depth, link_status in files:
logger.info(' '*depth+f'{path.name}'+' '+link_status.value)
total_sum[link_status] += 1
total_size[link_status] += path.stat().st_size
logger.info('')
logger.info(f"已链接 {total_sum[LinkStatus.Linked]} 个, 共 {str_size(total_size[LinkStatus.Linked])}")
logger.info(f"可链接 {total_sum[LinkStatus.CanLink]} 个, 共 {str_size(total_size[LinkStatus.CanLink])}")
logger.info(f"内容不一致 {total_sum[LinkStatus.NoMatch]} 个, 共 {str_size(total_size[LinkStatus.NoMatch])}")
logger.info(f"无目标 {total_sum[LinkStatus.NoTarget]} 个, 共 {str_size(total_size[LinkStatus.NoTarget])}") | null |
19,551 | from collections import Counter
from pathlib import Path
from loguru import logger
from eaio import __electron_source__
from eaio.function.link import link as link_, unlink as unlink_
from eaio.function.check import get_repos_status, find_app_entries, get_files_link_status
from eaio.function.download import download_electron
from eaio.util.error import TargetError, ScanError, RepoError, DownloadError
from eaio.util.status import LinkStatus, RepoRootStatus, RepoStatus, RepoChildStatus
from eaio.util.utils import to_drive, str_size
def get_repos_status() -> Generator[tuple[Path, int, RepoRootStatus | RepoStatus | RepoChildStatus], None, None]:
for drive in get_all_drives():
repo_root = drive.joinpath(__electron_repo_root__)
if not repo_root.exists():
logger.warning(f"{repo_root} 不存在")
yield repo_root, 0, RepoRootStatus.NotExist
continue
if not repo_root.is_dir():
logger.warning(f"{repo_root} 不为文件夹")
yield repo_root, 0, RepoRootStatus.NotDir
continue
yield repo_root, 0, RepoRootStatus.AllRight
for repo in repo_root.iterdir():
yield from check_repo_status(repo, 1)
ss RepoRootStatus(enum.Enum):
NotExist = "不存在"
NotDir = "非目录"
AllRight = "链接仓库根目录"
class RepoStatus(enum.Enum):
NotDownload = "未下载"
DownloadFailed = "下载失败"
AllRight = "链接仓库"
class RepoChildStatus(enum.Enum):
Downloaded = "已下载"
Deleted = "被删除"
Modified = "发生改动"
IsDir = "文件夹"
def status():
for path, depth, repo_status in get_repos_status():
match depth, repo_status:
case 0, RepoRootStatus.AllRight:
logger.info(f"{path}")
case 0, RepoRootStatus.NotExist:
logger.info(f"{path} 未创建")
case 1, RepoStatus.AllRight:
logger.info(' '*depth*2+f"{path.name}")
case 1, _:
logger.error(' '*depth*2+f"{path.name} {repo_status.value}")
case _, RepoChildStatus.Downloaded | RepoChildStatus.IsDir if depth > 1:
pass # logger.debug(' '*2*2+f"{path.relative_to(path.parents[depth-2])} {repo_status.value}")
case _, _ if depth > 1:
logger.error(' '*2*2+f"{path.relative_to(path.parents[depth-2])} {repo_status.value}")
case _, _:
logger.error('未知: '+' '*depth*2+f"{path.name} {repo_status.value}") | null |
19,552 | import argparse
from pathlib import Path
import sys
from loguru import logger
from eaio import __fullname__, __description__, __electron_repo_root__, __electron_source__
from eaio.entry.gui import gui
from eaio.entry.cli import link, unlink, check, status, download
from eaio.util.utils import to_drive, log
log = io.StringIO()
def log_config(verbose: bool = False):
logger.remove()
log_format = "<level>{level: ^8}</level> | <level>{message}</level>"
logger.add(log, filter=lambda log_instance: log_instance['level'].name == "DEBUG")
logger.add(log, filter=lambda log_instance: log_instance['level'].name == "WARNING")
logger.add(sys.stdout, format=log_format, filter=lambda log_instance: log_instance['level'].name == "INFO")
logger.add(sys.stderr, format=log_format, filter=lambda log_instance: log_instance['level'].name == "ERROR")
if verbose:
logger.add(sys.stdout, format=log_format, filter=lambda log_instance: log_instance['level'].name == "DEBUG")
logger.add(sys.stderr, format=log_format, filter=lambda log_instance: log_instance['level'].name == "WARNING") | null |
19,553 | import sys
import json
from typing import Optional
import cv2
import torch
from PIL import Image
import mmcv
from mmdet.core.visualization.image import imshow_det_bboxes
import numpy as np
import pycocotools.mask as maskUtils
from transformers import CLIPProcessor, CLIPModel
from transformers import AutoProcessor, CLIPSegForImageSegmentation
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
from transformers import BlipProcessor, BlipForConditionalGeneration
from cog import BasePredictor, Input, Path, BaseModel
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator
from configs.ade20k_id2label import CONFIG as CONFIG_ADE20K_ID2LABEL
from configs.coco_id2label import CONFIG as CONFIG_COCO_ID2LABEL
from clip import clip_classification
from clipseg import clipseg_segmentation
from oneformer import oneformer_coco_segmentation, oneformer_ade20k_segmentation
from blip import open_vocabulary_classification_blip
def clip_classification(image, class_list, top_k, clip_processor, clip_model, rank):
def clipseg_segmentation(image, class_list, clipseg_processor, clipseg_model, rank):
def oneformer_coco_segmentation(image, oneformer_coco_processor, oneformer_coco_model, rank):
def oneformer_ade20k_segmentation(image, oneformer_ade20k_processor, oneformer_ade20k_model, rank):
def open_vocabulary_classification_blip(raw_image, blip_processor, blip_model, rank):
def semantic_annotation_pipeline(
seg_json,
image,
json_out,
seg_out,
rank=0,
scale_small=1.2,
scale_large=1.6,
clip_processor=None,
clip_model=None,
oneformer_ade20k_processor=None,
oneformer_ade20k_model=None,
oneformer_coco_processor=None,
oneformer_coco_model=None,
blip_processor=None,
blip_model=None,
clipseg_processor=None,
clipseg_model=None,
):
anns = mmcv.load(seg_json)
img = mmcv.imread(image)
bitmasks, class_names = [], []
class_ids_from_oneformer_coco = oneformer_coco_segmentation(
Image.fromarray(img), oneformer_coco_processor, oneformer_coco_model, 0
)
class_ids_from_oneformer_ade20k = oneformer_ade20k_segmentation(
Image.fromarray(img), oneformer_ade20k_processor, oneformer_ade20k_model, 0
)
for ann in anns:
valid_mask = torch.tensor(maskUtils.decode(ann["segmentation"])).bool()
# get the class ids of the valid pixels
coco_propose_classes_ids = class_ids_from_oneformer_coco[valid_mask]
ade20k_propose_classes_ids = class_ids_from_oneformer_ade20k[valid_mask]
top_k_coco_propose_classes_ids = (
torch.bincount(coco_propose_classes_ids.flatten()).topk(1).indices
)
top_k_ade20k_propose_classes_ids = (
torch.bincount(ade20k_propose_classes_ids.flatten()).topk(1).indices
)
local_class_names = set()
local_class_names = set.union(
local_class_names,
set(
[
CONFIG_ADE20K_ID2LABEL["id2label"][str(class_id.item())]
for class_id in top_k_ade20k_propose_classes_ids
]
),
)
local_class_names = set.union(
local_class_names,
set(
(
[
CONFIG_COCO_ID2LABEL["refined_id2label"][str(class_id.item())]
for class_id in top_k_coco_propose_classes_ids
]
)
),
)
patch_small = mmcv.imcrop(
img,
np.array(
[
ann["bbox"][0],
ann["bbox"][1],
ann["bbox"][0] + ann["bbox"][2],
ann["bbox"][1] + ann["bbox"][3],
]
),
scale=scale_small,
)
patch_large = mmcv.imcrop(
img,
np.array(
[
ann["bbox"][0],
ann["bbox"][1],
ann["bbox"][0] + ann["bbox"][2],
ann["bbox"][1] + ann["bbox"][3],
]
),
scale=scale_large,
)
patch_huge = mmcv.imcrop(
img,
np.array(
[
ann["bbox"][0],
ann["bbox"][1],
ann["bbox"][0] + ann["bbox"][2],
ann["bbox"][1] + ann["bbox"][3],
]
),
scale=scale_large,
)
valid_mask_huge_crop = mmcv.imcrop(
valid_mask.numpy(),
np.array(
[
ann["bbox"][0],
ann["bbox"][1],
ann["bbox"][0] + ann["bbox"][2],
ann["bbox"][1] + ann["bbox"][3],
]
),
scale=scale_large,
)
op_class_list = open_vocabulary_classification_blip(
patch_large, blip_processor, blip_model, rank
)
local_class_list = list(
set.union(local_class_names, set(op_class_list))
) # , set(refined_imagenet_class_names)
mask_categories = clip_classification(
patch_small,
local_class_list,
3 if len(local_class_list) > 3 else len(local_class_list),
clip_processor,
clip_model,
rank,
)
class_ids_patch_huge = clipseg_segmentation(
patch_huge, mask_categories, clipseg_processor, clipseg_model, rank
).argmax(0)
top_1_patch_huge = (
torch.bincount(
class_ids_patch_huge[torch.tensor(valid_mask_huge_crop)].flatten()
)
.topk(1)
.indices
)
top_1_mask_category = mask_categories[top_1_patch_huge.item()]
ann["class_name"] = str(top_1_mask_category)
ann["class_proposals"] = mask_categories
class_names.append(ann["class_name"])
bitmasks.append(maskUtils.decode(ann["segmentation"]))
mmcv.dump(anns, json_out)
imshow_det_bboxes(
img,
bboxes=None,
labels=np.arange(len(bitmasks)),
segms=np.stack(bitmasks),
class_names=class_names,
font_size=25,
show=False,
out_file=seg_out,
) | null |
19,554 | import torch.nn.functional as F
def segformer_segmentation(image, processor, model, rank):
h, w, _ = image.shape
inputs = processor(images=image, return_tensors="pt").to(rank)
outputs = model(**inputs)
logits = outputs.logits
logits = F.interpolate(logits, size=(h, w), mode='bilinear', align_corners=True)
predicted_semantic_map = logits.argmax(dim=1).squeeze(0)
return predicted_semantic_map | null |
19,555 | import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from mmseg.core import add_prefix
from mmseg.ops import resize
from mmcv.utils import print_log
import os
import mmcv
import argparse
import numpy as np
from collections import OrderedDict
import pycocotools.mask as maskUtils
from prettytable import PrettyTable
from torchvision.utils import save_image, make_grid
from mmseg.core import eval_metrics, intersect_and_union, pre_eval_to_metrics
args = parse_args()
if args.dataset == 'cityscapes' or args.dataset == 'foggy_driving':
class_names = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
elif args.dataset == 'ade20k':
class_names = ('wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag')
if args.dataset == 'cityscapes':
prefixs = ['frankfurt','lindau','munster']
elif args.dataset == 'foggy_driving':
prefixs = ['public', 'pedestrian']
elif args.dataset == 'ade20k':
prefixs = ['']
else:
raise NotImplementedError
def parse_args():
parser = argparse.ArgumentParser(description='Semantically segment anything.')
parser.add_argument('--gt_path', help='the directory of gt annotations')
parser.add_argument('--result_path', help='the directory of semantic predictions')
parser.add_argument('--dataset', type=str, default='cityscapes', choices=['ade20k', 'cityscapes', 'foggy_driving'], help='specify the dataset')
args = parser.parse_args()
return args | null |
19,556 | import torch
import torch.nn.functional as F
def oneformer_cityscapes_segmentation(image, oneformer_cityscapes_processor, oneformer_cityscapes_model, rank):
inputs = oneformer_cityscapes_processor(images=image, task_inputs=["semantic"], return_tensors="pt").to(rank)
outputs = oneformer_cityscapes_model(**inputs)
predicted_semantic_map = oneformer_cityscapes_processor.post_process_semantic_segmentation(
outputs, target_sizes=[image.size[::-1]])[0]
return predicted_semantic_map | null |
19,557 | import os
import argparse
import torch
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator
from pipeline import semantic_segment_anything_inference, eval_pipeline, img_load
from configs.ade20k_id2label import CONFIG as CONFIG_ADE20K_ID2LABEL
from configs.cityscapes_id2label import CONFIG as CONFIG_CITYSCAPES_ID2LABEL
import torch.distributed as dist
import torch.multiprocessing as mp
def parse_args():
parser = argparse.ArgumentParser(description='Semantically segment anything.')
parser.add_argument('--data_dir', help='specify the root path of images and masks')
parser.add_argument('--ckpt_path', default='ckp/sam_vit_h_4b8939.pth', help='specify the root path of SAM checkpoint')
parser.add_argument('--out_dir', help='the dir to save semantic annotations')
parser.add_argument('--save_img', default=False, action='store_true', help='whether to save annotated images')
parser.add_argument('--world_size', type=int, default=0, help='number of nodes')
parser.add_argument('--dataset', type=str, default='ade20k', choices=['ade20k', 'cityscapes', 'foggy_driving'], help='specify the set of class names')
parser.add_argument('--eval', default=False, action='store_true', help='whether to execute evalution')
parser.add_argument('--gt_path', default=None, help='specify the path to gt annotations')
parser.add_argument('--model', type=str, default='segformer', choices=['oneformer', 'segformer'], help='specify the semantic branch model')
args = parser.parse_args()
return args | null |
19,558 | import os
import torch
import torch.nn.functional as F
from PIL import Image
import mmcv
from tqdm import tqdm
from mmcv.utils import print_log
from mmdet.core.visualization.image import imshow_det_bboxes
from mmseg.core import intersect_and_union, pre_eval_to_metrics
from collections import OrderedDict
from prettytable import PrettyTable
import numpy as np
import pycocotools.mask as maskUtils
from configs.ade20k_id2label import CONFIG as CONFIG_ADE20K_ID2LABEL
from configs.coco_id2label import CONFIG as CONFIG_COCO_ID2LABEL
from clip import clip_classification
from clipseg import clipseg_segmentation
from oneformer import oneformer_coco_segmentation, oneformer_ade20k_segmentation, oneformer_cityscapes_segmentation
from blip import open_vocabulary_classification_blip
from segformer import segformer_segmentation as segformer_func
def load_filename_with_extensions(data_path, filename):
"""
Returns file with corresponding extension to json file.
Raise error if such file is not found.
Args:
filename (str): Filename (without extension).
Returns:
filename with the right extension.
"""
full_file_path = os.path.join(data_path, filename)
# List of image file extensions to attempt
image_extensions = ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.tiff']
# Iterate through image file extensions and attempt to upload the file
for ext in image_extensions:
# Check if the file with current extension exists
if os.path.exists(full_file_path + ext):
return full_file_path + ext # Return True if file is successfully uploaded
raise FileNotFoundError(f"No such file {full_file_path}, checked for the following extensions {image_extensions}")
def clip_classification(image, class_list, top_k, clip_processor, clip_model, rank):
inputs = clip_processor(text=class_list, images=image, return_tensors="pt", padding=True).to(rank)
outputs = clip_model(**inputs)
logits_per_image = outputs.logits_per_image
probs = logits_per_image.softmax(dim=1)
if top_k == 1:
class_name = class_list[probs.argmax().item()]
return class_name
else:
top_k_indices = probs.topk(top_k, dim=1).indices[0]
top_k_class_names = [class_list[index] for index in top_k_indices]
return top_k_class_names
def clipseg_segmentation(image, class_list, clipseg_processor, clipseg_model, rank):
inputs = clipseg_processor(
text=class_list, images=[image] * len(class_list),
padding=True, return_tensors="pt").to(rank)
# resize inputs['pixel_values'] to the longesr side of inputs['pixel_values']
h, w = inputs['pixel_values'].shape[-2:]
fixed_scale = (512, 512)
inputs['pixel_values'] = F.interpolate(
inputs['pixel_values'],
size=fixed_scale,
mode='bilinear',
align_corners=False)
outputs = clipseg_model(**inputs)
logits = F.interpolate(outputs.logits[None], size=(h, w), mode='bilinear', align_corners=False)[0]
return logits
def oneformer_coco_segmentation(image, oneformer_coco_processor, oneformer_coco_model, rank):
inputs = oneformer_coco_processor(images=image, task_inputs=["semantic"], return_tensors="pt").to(rank)
outputs = oneformer_coco_model(**inputs)
predicted_semantic_map = oneformer_coco_processor.post_process_semantic_segmentation(
outputs, target_sizes=[image.size[::-1]])[0]
return predicted_semantic_map
def oneformer_ade20k_segmentation(image, oneformer_ade20k_processor, oneformer_ade20k_model, rank):
inputs = oneformer_ade20k_processor(images=image, task_inputs=["semantic"], return_tensors="pt").to(rank)
outputs = oneformer_ade20k_model(**inputs)
predicted_semantic_map = oneformer_ade20k_processor.post_process_semantic_segmentation(
outputs, target_sizes=[image.size[::-1]])[0]
return predicted_semantic_map
def open_vocabulary_classification_blip(raw_image, blip_processor, blip_model, rank):
# unconditional image captioning
captioning_inputs = blip_processor(raw_image, return_tensors="pt").to(rank)
out = blip_model.generate(**captioning_inputs)
caption = blip_processor.decode(out[0], skip_special_tokens=True)
ov_class_list = get_noun_phrases(caption)
return ov_class_list
def semantic_annotation_pipeline(filename, data_path, output_path, rank, save_img=False, scale_small=1.2, scale_large=1.6, scale_huge=1.6,
clip_processor=None,
clip_model=None,
oneformer_ade20k_processor=None,
oneformer_ade20k_model=None,
oneformer_coco_processor=None,
oneformer_coco_model=None,
blip_processor=None,
blip_model=None,
clipseg_processor=None,
clipseg_model=None,
mask_generator=None):
img = mmcv.imread(load_filename_with_extensions(data_path, filename))
if mask_generator is None:
anns = mmcv.load(os.path.join(data_path, filename+'.json'))
else:
anns = {'annotations': mask_generator.generate(img)}
bitmasks, class_names = [], []
class_ids_from_oneformer_coco = oneformer_coco_segmentation(Image.fromarray(img),oneformer_coco_processor,oneformer_coco_model, rank)
class_ids_from_oneformer_ade20k = oneformer_ade20k_segmentation(Image.fromarray(img),oneformer_ade20k_processor,oneformer_ade20k_model, rank)
for ann in anns['annotations']:
valid_mask = torch.tensor(maskUtils.decode(ann['segmentation'])).bool()
# get the class ids of the valid pixels
coco_propose_classes_ids = class_ids_from_oneformer_coco[valid_mask]
ade20k_propose_classes_ids = class_ids_from_oneformer_ade20k[valid_mask]
top_k_coco_propose_classes_ids = torch.bincount(coco_propose_classes_ids.flatten()).topk(1).indices
top_k_ade20k_propose_classes_ids = torch.bincount(ade20k_propose_classes_ids.flatten()).topk(1).indices
local_class_names = set()
local_class_names = set.union(local_class_names, set([CONFIG_ADE20K_ID2LABEL['id2label'][str(class_id.item())] for class_id in top_k_ade20k_propose_classes_ids]))
local_class_names = set.union(local_class_names, set(([CONFIG_COCO_ID2LABEL['refined_id2label'][str(class_id.item())] for class_id in top_k_coco_propose_classes_ids])))
patch_small = mmcv.imcrop(img, np.array(
[ann['bbox'][0], ann['bbox'][1], ann['bbox'][0] + ann['bbox'][2], ann['bbox'][1] + ann['bbox'][3]]),
scale=scale_small)
patch_large = mmcv.imcrop(img, np.array(
[ann['bbox'][0], ann['bbox'][1], ann['bbox'][0] + ann['bbox'][2], ann['bbox'][1] + ann['bbox'][3]]),
scale=scale_large)
patch_huge = mmcv.imcrop(img, np.array(
[ann['bbox'][0], ann['bbox'][1], ann['bbox'][0] + ann['bbox'][2], ann['bbox'][1] + ann['bbox'][3]]),
scale=scale_huge)
valid_mask_huge_crop = mmcv.imcrop(valid_mask.numpy(), np.array(
[ann['bbox'][0], ann['bbox'][1], ann['bbox'][0] + ann['bbox'][2], ann['bbox'][1] + ann['bbox'][3]]),
scale=scale_huge)
op_class_list = open_vocabulary_classification_blip(patch_large,blip_processor, blip_model, rank)
local_class_list = list(set.union(local_class_names, set(op_class_list))) # , set(refined_imagenet_class_names)
mask_categories = clip_classification(patch_small, local_class_list, 3 if len(local_class_list)> 3 else len(local_class_list), clip_processor, clip_model, rank)
class_ids_patch_huge = clipseg_segmentation(patch_huge, mask_categories, clipseg_processor, clipseg_model, rank).argmax(0)
valid_mask_huge_crop = torch.tensor(valid_mask_huge_crop)
if valid_mask_huge_crop.shape != class_ids_patch_huge.shape:
valid_mask_huge_crop = F.interpolate(
valid_mask_huge_crop.unsqueeze(0).unsqueeze(0).float(),
size=(class_ids_patch_huge.shape[-2], class_ids_patch_huge.shape[-1]),
mode='nearest').squeeze(0).squeeze(0).bool()
top_1_patch_huge = torch.bincount(class_ids_patch_huge[valid_mask_huge_crop].flatten()).topk(1).indices
top_1_mask_category = mask_categories[top_1_patch_huge.item()]
ann['class_name'] = str(top_1_mask_category)
ann['class_proposals'] = mask_categories
class_names.append(str(top_1_mask_category))
# bitmasks.append(maskUtils.decode(ann['segmentation']))
# Delete variables that are no longer needed
del coco_propose_classes_ids
del ade20k_propose_classes_ids
del top_k_coco_propose_classes_ids
del top_k_ade20k_propose_classes_ids
del patch_small
del patch_large
del patch_huge
del valid_mask_huge_crop
del op_class_list
del mask_categories
del class_ids_patch_huge
mmcv.dump(anns, os.path.join(output_path, filename + '_semantic.json'))
print('[Save] save SSA-engine annotation results: ', os.path.join(output_path, filename + '_semantic.json'))
if save_img:
for ann in anns['annotations']:
bitmasks.append(maskUtils.decode(ann['segmentation']))
imshow_det_bboxes(img,
bboxes=None,
labels=np.arange(len(bitmasks)),
segms=np.stack(bitmasks),
class_names=class_names,
font_size=25,
show=False,
out_file=os.path.join(output_path, filename+'_semantic.png'))
# Delete variables that are no longer needed
del img
del anns
del class_ids_from_oneformer_coco
del class_ids_from_oneformer_ade20k | null |
19,559 | import os
import torch
import torch.nn.functional as F
from PIL import Image
import mmcv
from tqdm import tqdm
from mmcv.utils import print_log
from mmdet.core.visualization.image import imshow_det_bboxes
from mmseg.core import intersect_and_union, pre_eval_to_metrics
from collections import OrderedDict
from prettytable import PrettyTable
import numpy as np
import pycocotools.mask as maskUtils
from configs.ade20k_id2label import CONFIG as CONFIG_ADE20K_ID2LABEL
from configs.coco_id2label import CONFIG as CONFIG_COCO_ID2LABEL
from clip import clip_classification
from clipseg import clipseg_segmentation
from oneformer import oneformer_coco_segmentation, oneformer_ade20k_segmentation, oneformer_cityscapes_segmentation
from blip import open_vocabulary_classification_blip
from segformer import segformer_segmentation as segformer_func
def img_load(data_path, filename, dataset):
# load image
if dataset == 'ade20k':
img = mmcv.imread(os.path.join(data_path, filename+'.jpg'))
elif dataset == 'cityscapes' or dataset == 'foggy_driving':
img = mmcv.imread(os.path.join(data_path, filename+'.png'))
else:
raise NotImplementedError()
return img | null |
19,560 | import os
import torch
import torch.nn.functional as F
from PIL import Image
import mmcv
from tqdm import tqdm
from mmcv.utils import print_log
from mmdet.core.visualization.image import imshow_det_bboxes
from mmseg.core import intersect_and_union, pre_eval_to_metrics
from collections import OrderedDict
from prettytable import PrettyTable
import numpy as np
import pycocotools.mask as maskUtils
from configs.ade20k_id2label import CONFIG as CONFIG_ADE20K_ID2LABEL
from configs.coco_id2label import CONFIG as CONFIG_COCO_ID2LABEL
from clip import clip_classification
from clipseg import clipseg_segmentation
from oneformer import oneformer_coco_segmentation, oneformer_ade20k_segmentation, oneformer_cityscapes_segmentation
from blip import open_vocabulary_classification_blip
from segformer import segformer_segmentation as segformer_func
oneformer_func = {
'ade20k': oneformer_ade20k_segmentation,
'coco': oneformer_coco_segmentation,
'cityscapes': oneformer_cityscapes_segmentation,
'foggy_driving': oneformer_cityscapes_segmentation
}
def semantic_segment_anything_inference(filename, output_path, rank, img=None, save_img=False,
semantic_branch_processor=None,
semantic_branch_model=None,
mask_branch_model=None,
dataset=None,
id2label=None,
model='segformer'):
anns = {'annotations': mask_branch_model.generate(img)}
h, w, _ = img.shape
class_names = []
if model == 'oneformer':
class_ids = oneformer_func[dataset](Image.fromarray(img), semantic_branch_processor,
semantic_branch_model, rank)
elif model == 'segformer':
class_ids = segformer_func(img, semantic_branch_processor, semantic_branch_model, rank)
else:
raise NotImplementedError()
semantc_mask = class_ids.clone()
anns['annotations'] = sorted(anns['annotations'], key=lambda x: x['area'], reverse=True)
for ann in anns['annotations']:
valid_mask = torch.tensor(maskUtils.decode(ann['segmentation'])).bool()
# get the class ids of the valid pixels
propose_classes_ids = class_ids[valid_mask]
num_class_proposals = len(torch.unique(propose_classes_ids))
if num_class_proposals == 1:
semantc_mask[valid_mask] = propose_classes_ids[0]
ann['class_name'] = id2label['id2label'][str(propose_classes_ids[0].item())]
ann['class_proposals'] = id2label['id2label'][str(propose_classes_ids[0].item())]
class_names.append(ann['class_name'])
# bitmasks.append(maskUtils.decode(ann['segmentation']))
continue
top_1_propose_class_ids = torch.bincount(propose_classes_ids.flatten()).topk(1).indices
top_1_propose_class_names = [id2label['id2label'][str(class_id.item())] for class_id in top_1_propose_class_ids]
semantc_mask[valid_mask] = top_1_propose_class_ids
ann['class_name'] = top_1_propose_class_names[0]
ann['class_proposals'] = top_1_propose_class_names[0]
class_names.append(ann['class_name'])
# bitmasks.append(maskUtils.decode(ann['segmentation']))
del valid_mask
del propose_classes_ids
del num_class_proposals
del top_1_propose_class_ids
del top_1_propose_class_names
sematic_class_in_img = torch.unique(semantc_mask)
semantic_bitmasks, semantic_class_names = [], []
# semantic prediction
anns['semantic_mask'] = {}
for i in range(len(sematic_class_in_img)):
class_name = id2label['id2label'][str(sematic_class_in_img[i].item())]
class_mask = semantc_mask == sematic_class_in_img[i]
class_mask = class_mask.cpu().numpy().astype(np.uint8)
semantic_class_names.append(class_name)
semantic_bitmasks.append(class_mask)
anns['semantic_mask'][str(sematic_class_in_img[i].item())] = maskUtils.encode(np.array((semantc_mask == sematic_class_in_img[i]).cpu().numpy(), order='F', dtype=np.uint8))
anns['semantic_mask'][str(sematic_class_in_img[i].item())]['counts'] = anns['semantic_mask'][str(sematic_class_in_img[i].item())]['counts'].decode('utf-8')
if save_img:
imshow_det_bboxes(img,
bboxes=None,
labels=np.arange(len(sematic_class_in_img)),
segms=np.stack(semantic_bitmasks),
class_names=semantic_class_names,
font_size=25,
show=False,
out_file=os.path.join(output_path, filename + '_semantic.png'))
print('[Save] save SSA prediction: ', os.path.join(output_path, filename + '_semantic.png'))
mmcv.dump(anns, os.path.join(output_path, filename + '_semantic.json'))
# 手动清理不再需要的变量
del img
del anns
del class_ids
del semantc_mask
# del bitmasks
del class_names
del semantic_bitmasks
del semantic_class_names
# gc.collect() | null |
19,561 | import os
import torch
import torch.nn.functional as F
from PIL import Image
import mmcv
from tqdm import tqdm
from mmcv.utils import print_log
from mmdet.core.visualization.image import imshow_det_bboxes
from mmseg.core import intersect_and_union, pre_eval_to_metrics
from collections import OrderedDict
from prettytable import PrettyTable
import numpy as np
import pycocotools.mask as maskUtils
from configs.ade20k_id2label import CONFIG as CONFIG_ADE20K_ID2LABEL
from configs.coco_id2label import CONFIG as CONFIG_COCO_ID2LABEL
from clip import clip_classification
from clipseg import clipseg_segmentation
from oneformer import oneformer_coco_segmentation, oneformer_ade20k_segmentation, oneformer_cityscapes_segmentation
from blip import open_vocabulary_classification_blip
from segformer import segformer_segmentation as segformer_func
def eval_pipeline(gt_path, res_path, dataset):
logger = None
if dataset == 'cityscapes' or dataset == 'foggy_driving':
class_names = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
elif dataset == 'ade20k':
class_names = ('wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag')
file_client = mmcv.FileClient(**{'backend': 'disk'})
pre_eval_results = []
if dataset == 'cityscapes':
prefixs = ['frankfurt','lindau','munster']
elif dataset == 'foggy_driving':
prefixs = ['public', 'pedestrian']
elif dataset == 'ade20k':
prefixs = ['']
else:
raise NotImplementedError
for split in tqdm(prefixs, desc="Split loop"):
gt_path_split = os.path.join(gt_path, split)
res_path_split = os.path.join(res_path, split)
filenames = [fn_ for fn_ in os.listdir(res_path_split) if '.json' in fn_]
for i, fn_ in enumerate(tqdm(filenames, desc="File loop")):
pred_fn = os.path.join(res_path_split, fn_)
result = mmcv.load(pred_fn)
num_classes = len(class_names)
init_flag = True
for id_str, mask in result['semantic_mask'].items():
mask_ = maskUtils.decode(mask)
h, w = mask_.shape
if init_flag:
seg_mask = torch.zeros((1, 1, h, w))
init_flag = False
mask_ = torch.from_numpy(mask_).unsqueeze(0).unsqueeze(0)
seg_mask[mask_] = int(id_str)
seg_logit = torch.zeros((1, num_classes, h, w))
seg_logit.scatter_(1, seg_mask.long(), 1)
seg_logit = seg_logit.float()
seg_pred = F.softmax(seg_logit, dim=1).argmax(dim=1).squeeze(0).numpy()
if dataset == 'cityscapes' or dataset == 'foggy_driving':
gt_fn_ = os.path.join(gt_path_split, fn_.replace('_leftImg8bit_semantic.json','_gtFine_labelTrainIds.png'))
elif dataset == 'ade20k':
gt_fn_ = os.path.join(gt_path, fn_.replace('_semantic.json','.png'))
img_bytes = file_client.get(gt_fn_)
seg_map = mmcv.imfrombytes(
img_bytes, flag='unchanged',
backend='pillow').squeeze().astype(np.uint8)
if dataset=='ade20k':
seg_map = seg_map - 1
pre_eval_results.append(intersect_and_union(
seg_pred,
seg_map,
num_classes,
255,
label_map=dict(),
reduce_zero_label=False))
ret_metrics = pre_eval_to_metrics(pre_eval_results, ['mIoU'])
ret_metrics_summary = OrderedDict({
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
# each class table
ret_metrics.pop('aAcc', None)
ret_metrics_class = OrderedDict({
ret_metric: np.round(ret_metric_value * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)
# for logger
class_table_data = PrettyTable()
for key, val in ret_metrics_class.items():
class_table_data.add_column(key, val)
summary_table_data = PrettyTable()
for key, val in ret_metrics_summary.items():
if key == 'aAcc':
summary_table_data.add_column(key, [val])
else:
summary_table_data.add_column('m' + key, [val])
print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
print_log('Summary:', logger)
print_log('\n' + summary_table_data.get_string(), logger=logger) | null |
19,562 | import os
import torch
import argparse
from pipeline import semantic_annotation_pipeline
from transformers import CLIPProcessor, CLIPModel
from transformers import AutoProcessor, CLIPSegForImageSegmentation
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
from transformers import BlipProcessor, BlipForConditionalGeneration
import torch.distributed as dist
import torch.multiprocessing as mp
def parse_args():
parser = argparse.ArgumentParser(description='Semantically segment anything.')
parser.add_argument('--data_dir', help='specify the root path of images and masks')
parser.add_argument('--out_dir', help='the dir to save semantic annotations')
parser.add_argument('--save_img', default=False, action='store_true', help='whether to save annotated images')
parser.add_argument('--world_size', type=int, default=0, help='number of nodes')
parser.add_argument('--sam', default=False, action='store_true', help='use SAM but not given annotation json, default is False')
parser.add_argument('--ckpt_path', default='ckp/sam_vit_h_4b8939.pth', help='specify the root path of SAM checkpoint')
parser.add_argument('--light_mode', default=False, action='store_true', help='use light mode')
args = parser.parse_args()
return args | null |
19,563 | import os
import numpy
from setuptools import find_packages, setup, Extension
def read(rel_path: str) -> str:
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path), encoding="utf-8") as fp:
return fp.read()
def get_version(rel_path: str) -> str:
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.") | null |
19,564 | from ...data.dataset.handler import DataHandlerLP
from ...data.dataset.processor import Processor
from ...utils import get_callable_kwargs
from ...data.dataset import processor as processor_module
from inspect import getfullargspec
class Processor(Serializable):
def fit(self, df: pd.DataFrame = None):
"""
learn data processing parameters
Parameters
----------
df : pd.DataFrame
When we fit and process data with processor one by one. The fit function reiles on the output of previous
processor, i.e. `df`.
"""
def __call__(self, df: pd.DataFrame):
"""
process the data
NOTE: **The processor could change the content of `df` inplace !!!!! **
User should keep a copy of data outside
Parameters
----------
df : pd.DataFrame
The raw_df of handler or result from previous processor.
"""
def is_for_infer(self) -> bool:
"""
Is this processor usable for inference
Some processors are not usable for inference.
Returns
-------
bool:
if it is usable for infenrece.
"""
return True
def readonly(self) -> bool:
"""
Does the processor treat the input data readonly (i.e. does not write the input data) when processing
Knowning the readonly information is helpful to the Handler to avoid uncessary copy
"""
return False
def config(self, **kwargs):
attr_list = {"fit_start_time", "fit_end_time"}
for k, v in kwargs.items():
if k in attr_list and hasattr(self, k):
setattr(self, k, v)
for attr in attr_list:
if attr in kwargs:
kwargs.pop(attr)
super().config(**kwargs)
def check_transform_proc(proc_l, fit_start_time, fit_end_time):
new_l = []
for p in proc_l:
if not isinstance(p, Processor):
klass, pkwargs = get_callable_kwargs(p, processor_module)
args = getfullargspec(klass).args
if "fit_start_time" in args and "fit_end_time" in args:
assert (
fit_start_time is not None and fit_end_time is not None
), "Make sure `fit_start_time` and `fit_end_time` are not None."
pkwargs.update(
{
"fit_start_time": fit_start_time,
"fit_end_time": fit_end_time,
}
)
proc_config = {"class": klass.__name__, "kwargs": pkwargs}
if isinstance(p, dict) and "module_path" in p:
proc_config["module_path"] = p["module_path"]
new_l.append(proc_config)
else:
new_l.append(p)
return new_l | null |
19,565 | import copy
import torch
import warnings
import numpy as np
import pandas as pd
from qlib.data.dataset import DatasetH
device = "cuda" if torch.cuda.is_available() else "cpu"
def _to_tensor(x):
if not isinstance(x, torch.Tensor):
return torch.tensor(x, dtype=torch.float, device=device) # pylint: disable=E1101
return x | null |
19,566 | import copy
import torch
import warnings
import numpy as np
import pandas as pd
from qlib.data.dataset import DatasetH
The provided code snippet includes necessary dependencies for implementing the `_create_ts_slices` function. Write a Python function `def _create_ts_slices(index, seq_len)` to solve the following problem:
create time series slices from pandas index Args: index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order seq_len (int): sequence length
Here is the function:
def _create_ts_slices(index, seq_len):
"""
create time series slices from pandas index
Args:
index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order
seq_len (int): sequence length
"""
assert isinstance(index, pd.MultiIndex), "unsupported index type"
assert seq_len > 0, "sequence length should be larger than 0"
assert index.is_monotonic_increasing, "index should be sorted"
# number of dates for each instrument
sample_count_by_insts = index.to_series().groupby(level=0).size().values
# start index for each instrument
start_index_of_insts = np.roll(np.cumsum(sample_count_by_insts), 1)
start_index_of_insts[0] = 0
# all the [start, stop) indices of features
# features between [start, stop) will be used to predict label at `stop - 1`
slices = []
for cur_loc, cur_cnt in zip(start_index_of_insts, sample_count_by_insts):
for stop in range(1, cur_cnt + 1):
end = cur_loc + stop
start = max(end - seq_len, 0)
slices.append(slice(start, end))
slices = np.array(slices, dtype="object")
assert len(slices) == len(index) # the i-th slice = index[i]
return slices | create time series slices from pandas index Args: index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order seq_len (int): sequence length |
19,567 | import copy
import torch
import warnings
import numpy as np
import pandas as pd
from qlib.data.dataset import DatasetH
The provided code snippet includes necessary dependencies for implementing the `_get_date_parse_fn` function. Write a Python function `def _get_date_parse_fn(target)` to solve the following problem:
get date parse function This method is used to parse date arguments as target type. Example: get_date_parse_fn('20120101')('2017-01-01') => '20170101' get_date_parse_fn(20120101)('2017-01-01') => 20170101
Here is the function:
def _get_date_parse_fn(target):
"""get date parse function
This method is used to parse date arguments as target type.
Example:
get_date_parse_fn('20120101')('2017-01-01') => '20170101'
get_date_parse_fn(20120101)('2017-01-01') => 20170101
"""
if isinstance(target, int):
def _fn(x):
return int(str(x).replace("-", "")[:8]) # 20200201
elif isinstance(target, str) and len(target) == 8:
def _fn(x):
return str(x).replace("-", "")[:8] # '20200201'
else:
def _fn(x):
return x # '2021-01-01'
return _fn | get date parse function This method is used to parse date arguments as target type. Example: get_date_parse_fn('20120101')('2017-01-01') => '20170101' get_date_parse_fn(20120101)('2017-01-01') => 20170101 |
19,568 | import copy
import torch
import warnings
import numpy as np
import pandas as pd
from qlib.data.dataset import DatasetH
The provided code snippet includes necessary dependencies for implementing the `_maybe_padding` function. Write a Python function `def _maybe_padding(x, seq_len, zeros=None)` to solve the following problem:
padding 2d <time * feature> data with zeros Args: x (np.ndarray): 2d data with shape <time * feature> seq_len (int): target sequence length zeros (np.ndarray): zeros with shape <seq_len * feature>
Here is the function:
def _maybe_padding(x, seq_len, zeros=None):
"""padding 2d <time * feature> data with zeros
Args:
x (np.ndarray): 2d data with shape <time * feature>
seq_len (int): target sequence length
zeros (np.ndarray): zeros with shape <seq_len * feature>
"""
assert seq_len > 0, "sequence length should be larger than 0"
if zeros is None:
zeros = np.zeros((seq_len, x.shape[1]), dtype=np.float32)
else:
assert len(zeros) >= seq_len, "zeros matrix is not large enough for padding"
if len(x) != seq_len: # padding zeros
x = np.concatenate([zeros[: seq_len - len(x), : x.shape[1]], x], axis=0)
return x | padding 2d <time * feature> data with zeros Args: x (np.ndarray): 2d data with shape <time * feature> seq_len (int): target sequence length zeros (np.ndarray): zeros with shape <seq_len * feature> |
19,569 | import pandas as pd
from typing import Dict, Iterable, Union
import builtins
def align_index(df_dict, join):
res = {}
for k, df in df_dict.items():
if join is not None and k != join:
df = df.reindex(df_dict[join].index)
res[k] = df
return res | null |
19,570 | import pandas as pd
from typing import Dict, Iterable, Union
class SepDataFrame:
"""
(Sep)erate DataFrame
We usually concat multiple dataframe to be processed together(Such as feature, label, weight, filter).
However, they are usually be used separately at last.
This will result in extra cost for concatenating and splitting data(reshaping and copying data in the memory is very expensive)
SepDataFrame tries to act like a DataFrame whose column with multiindex
"""
# TODO:
# SepDataFrame try to behave like pandas dataframe, but it is still not them same
# Contributions are welcome to make it more complete.
def __init__(self, df_dict: Dict[str, pd.DataFrame], join: str, skip_align=False):
"""
initialize the data based on the dataframe dictionary
Parameters
----------
df_dict : Dict[str, pd.DataFrame]
dataframe dictionary
join : str
how to join the data
It will reindex the dataframe based on the join key.
If join is None, the reindex step will be skipped
skip_align :
for some cases, we can improve performance by skipping aligning index
"""
self.join = join
if skip_align:
self._df_dict = df_dict
else:
self._df_dict = align_index(df_dict, join)
def loc(self):
return SDFLoc(self, join=self.join)
def index(self):
return self._df_dict[self.join].index
def apply_each(self, method: str, skip_align=True, *args, **kwargs):
"""
Assumptions:
- inplace methods will return None
"""
inplace = False
df_dict = {}
for k, df in self._df_dict.items():
df_dict[k] = getattr(df, method)(*args, **kwargs)
if df_dict[k] is None:
inplace = True
if not inplace:
return SepDataFrame(df_dict=df_dict, join=self.join, skip_align=skip_align)
def sort_index(self, *args, **kwargs):
return self.apply_each("sort_index", True, *args, **kwargs)
def copy(self, *args, **kwargs):
return self.apply_each("copy", True, *args, **kwargs)
def _update_join(self):
if self.join not in self:
if len(self._df_dict) > 0:
self.join = next(iter(self._df_dict.keys()))
else:
# NOTE: this will change the behavior of previous reindex when all the keys are empty
self.join = None
def __getitem__(self, item):
# TODO: behave more like pandas when multiindex
return self._df_dict[item]
def __setitem__(self, item: str, df: Union[pd.DataFrame, pd.Series]):
# TODO: consider the join behavior
if not isinstance(item, tuple):
self._df_dict[item] = df
else:
# NOTE: corner case of MultiIndex
_df_dict_key, *col_name = item
col_name = tuple(col_name)
if _df_dict_key in self._df_dict:
if len(col_name) == 1:
col_name = col_name[0]
self._df_dict[_df_dict_key][col_name] = df
else:
if isinstance(df, pd.Series):
if len(col_name) == 1:
col_name = col_name[0]
self._df_dict[_df_dict_key] = df.to_frame(col_name)
else:
df_copy = df.copy() # avoid changing df
df_copy.columns = pd.MultiIndex.from_tuples([(*col_name, *idx) for idx in df.columns.to_list()])
self._df_dict[_df_dict_key] = df_copy
def __delitem__(self, item: str):
del self._df_dict[item]
self._update_join()
def __contains__(self, item):
return item in self._df_dict
def __len__(self):
return len(self._df_dict[self.join])
def droplevel(self, *args, **kwargs):
raise NotImplementedError(f"Please implement the `droplevel` method")
def columns(self):
dfs = []
for k, df in self._df_dict.items():
df = df.head(0)
df.columns = pd.MultiIndex.from_product([[k], df.columns])
dfs.append(df)
return pd.concat(dfs, axis=1).columns
# Useless methods
def merge(df_dict: Dict[str, pd.DataFrame], join: str):
all_df = df_dict[join]
for k, df in df_dict.items():
if k != join:
all_df = all_df.join(df)
return all_df
import builtins
def _isinstance(instance, cls):
if isinstance_orig(instance, SepDataFrame): # pylint: disable=E0602 # noqa: F821
if isinstance(cls, Iterable):
for c in cls:
if c is pd.DataFrame:
return True
elif cls is pd.DataFrame:
return True
return isinstance_orig(instance, cls) # pylint: disable=E0602 # noqa: F821 | null |
19,571 | import argparse
import importlib
import os
import yaml
from .config import TunerConfigManager
TUNER_CONFIG_MANAGER = TunerConfigManager(args.config_path)
def run():
# 1. Get pipeline class.
tuner_pipeline_class = getattr(importlib.import_module(".pipeline", package="qlib.contrib.tuner"), "Pipeline")
# 2. Init tuner pipeline.
tuner_pipeline = tuner_pipeline_class(TUNER_CONFIG_MANAGER)
# 3. Begin to tune
tuner_pipeline.run() | null |
19,572 | import pathlib
import pickle
import yaml
import pandas as pd
from ...data import D
from ...config import C
from ...log import get_module_logger
from ...utils import get_next_trading_date
from ...backtest.exchange import Exchange
The provided code snippet includes necessary dependencies for implementing the `load_instance` function. Write a Python function `def load_instance(file_path)` to solve the following problem:
load a pickle file Parameter file_path : string / pathlib.Path() path of file to be loaded :return An instance loaded from file
Here is the function:
def load_instance(file_path):
"""
load a pickle file
Parameter
file_path : string / pathlib.Path()
path of file to be loaded
:return
An instance loaded from file
"""
file_path = pathlib.Path(file_path)
if not file_path.exists():
raise ValueError("Cannot find file {}".format(file_path))
with file_path.open("rb") as fr:
instance = pickle.load(fr)
return instance | load a pickle file Parameter file_path : string / pathlib.Path() path of file to be loaded :return An instance loaded from file |
19,573 | import pathlib
import pickle
import yaml
import pandas as pd
from ...data import D
from ...config import C
from ...log import get_module_logger
from ...utils import get_next_trading_date
from ...backtest.exchange import Exchange
C = QlibConfig(_default_config)
The provided code snippet includes necessary dependencies for implementing the `save_instance` function. Write a Python function `def save_instance(instance, file_path)` to solve the following problem:
save(dump) an instance to a pickle file Parameter instance : data to be dumped file_path : string / pathlib.Path() path of file to be dumped
Here is the function:
def save_instance(instance, file_path):
"""
save(dump) an instance to a pickle file
Parameter
instance :
data to be dumped
file_path : string / pathlib.Path()
path of file to be dumped
"""
file_path = pathlib.Path(file_path)
with file_path.open("wb") as fr:
pickle.dump(instance, fr, C.dump_protocol_version) | save(dump) an instance to a pickle file Parameter instance : data to be dumped file_path : string / pathlib.Path() path of file to be dumped |
19,574 | import pathlib
import pickle
import yaml
import pandas as pd
from ...data import D
from ...config import C
from ...log import get_module_logger
from ...utils import get_next_trading_date
from ...backtest.exchange import Exchange
def create_user_folder(path):
path = pathlib.Path(path)
if path.exists():
return
path.mkdir(parents=True)
head = pd.DataFrame(columns=("user_id", "add_date"))
head.to_csv(path / "users.csv", index=None) | null |
19,575 | import pathlib
import pickle
import yaml
import pandas as pd
from ...data import D
from ...config import C
from ...log import get_module_logger
from ...utils import get_next_trading_date
from ...backtest.exchange import Exchange
log = get_module_logger("utils")
def get_next_trading_date(trading_date, future=False):
"""get next trading date
----------
cur_date : pandas.Timestamp
current date
"""
return get_date_by_shift(trading_date, 1, future=future)
class Exchange:
# `quote_df` is a pd.DataFrame class that contains basic information for backtesting
# After some processing, the data will later be maintained by `quote_cls` object for faster data retrieving.
# Some conventions for `quote_df`
# - $close is for calculating the total value at end of each day.
# - if $close is None, the stock on that day is regarded as suspended.
# - $factor is for rounding to the trading unit;
# - if any $factor is missing when $close exists, trading unit rounding will be disabled
quote_df: pd.DataFrame
def __init__(
self,
freq: str = "day",
start_time: Union[pd.Timestamp, str] = None,
end_time: Union[pd.Timestamp, str] = None,
codes: Union[list, str] = "all",
deal_price: Union[str, Tuple[str, str], List[str], None] = None,
subscribe_fields: list = [],
limit_threshold: Union[Tuple[str, str], float, None] = None,
volume_threshold: Union[tuple, dict, None] = None,
open_cost: float = 0.0015,
close_cost: float = 0.0025,
min_cost: float = 5.0,
impact_cost: float = 0.0,
extra_quote: pd.DataFrame = None,
quote_cls: Type[BaseQuote] = NumpyQuote,
**kwargs: Any,
) -> None:
"""__init__
:param freq: frequency of data
:param start_time: closed start time for backtest
:param end_time: closed end time for backtest
:param codes: list stock_id list or a string of instruments(i.e. all, csi500, sse50)
:param deal_price: Union[str, Tuple[str, str], List[str]]
The `deal_price` supports following two types of input
- <deal_price> : str
- (<buy_price>, <sell_price>): Tuple[str] or List[str]
<deal_price>, <buy_price> or <sell_price> := <price>
<price> := str
- for example '$close', '$open', '$vwap' ("close" is OK. `Exchange` will help to prepend
"$" to the expression)
:param subscribe_fields: list, subscribe fields. This expressions will be added to the query and `self.quote`.
It is useful when users want more fields to be queried
:param limit_threshold: Union[Tuple[str, str], float, None]
1) `None`: no limitation
2) float, 0.1 for example, default None
3) Tuple[str, str]: (<the expression for buying stock limitation>,
<the expression for sell stock limitation>)
`False` value indicates the stock is tradable
`True` value indicates the stock is limited and not tradable
:param volume_threshold: Union[
Dict[
"all": ("cum" or "current", limit_str),
"buy": ("cum" or "current", limit_str),
"sell":("cum" or "current", limit_str),
],
("cum" or "current", limit_str),
]
1) ("cum" or "current", limit_str) denotes a single volume limit.
- limit_str is qlib data expression which is allowed to define your own Operator.
Please refer to qlib/contrib/ops/high_freq.py, here are any custom operator for
high frequency, such as DayCumsum. !!!NOTE: if you want you use the custom
operator, you need to register it in qlib_init.
- "cum" means that this is a cumulative value over time, such as cumulative market
volume. So when it is used as a volume limit, it is necessary to subtract the dealt
amount.
- "current" means that this is a real-time value and will not accumulate over time,
so it can be directly used as a capacity limit.
e.g. ("cum", "0.2 * DayCumsum($volume, '9:45', '14:45')"), ("current", "$bidV1")
2) "all" means the volume limits are both buying and selling.
"buy" means the volume limits of buying. "sell" means the volume limits of selling.
Different volume limits will be aggregated with min(). If volume_threshold is only
("cum" or "current", limit_str) instead of a dict, the volume limits are for
both by default. In other words, it is same as {"all": ("cum" or "current", limit_str)}.
3) e.g. "volume_threshold": {
"all": ("cum", "0.2 * DayCumsum($volume, '9:45', '14:45')"),
"buy": ("current", "$askV1"),
"sell": ("current", "$bidV1"),
}
:param open_cost: cost rate for open, default 0.0015
:param close_cost: cost rate for close, default 0.0025
:param trade_unit: trade unit, 100 for China A market.
None for disable trade unit.
**NOTE**: `trade_unit` is included in the `kwargs`. It is necessary because we must
distinguish `not set` and `disable trade_unit`
:param min_cost: min cost, default 5
:param impact_cost: market impact cost rate (a.k.a. slippage). A recommended value is 0.1.
:param extra_quote: pandas, dataframe consists of
columns: like ['$vwap', '$close', '$volume', '$factor', 'limit_sell', 'limit_buy'].
The limit indicates that the etf is tradable on a specific day.
Necessary fields:
$close is for calculating the total value at end of each day.
Optional fields:
$volume is only necessary when we limit the trade amount or calculate
PA(vwap) indicator
$vwap is only necessary when we use the $vwap price as the deal price
$factor is for rounding to the trading unit
limit_sell will be set to False by default (False indicates we can sell
this target on this day).
limit_buy will be set to False by default (False indicates we can buy
this target on this day).
index: MultipleIndex(instrument, pd.Datetime)
"""
self.freq = freq
self.start_time = start_time
self.end_time = end_time
self.trade_unit = kwargs.pop("trade_unit", C.trade_unit)
if len(kwargs) > 0:
raise ValueError(f"Get Unexpected arguments {kwargs}")
if limit_threshold is None:
limit_threshold = C.limit_threshold
if deal_price is None:
deal_price = C.deal_price
# we have some verbose information here. So logging is enabled
self.logger = get_module_logger("online operator")
# TODO: the quote, trade_dates, codes are not necessary.
# It is just for performance consideration.
self.limit_type = self._get_limit_type(limit_threshold)
if limit_threshold is None:
if C.region in [REG_CN, REG_TW]:
self.logger.warning(f"limit_threshold not set. The stocks hit the limit may be bought/sold")
elif self.limit_type == self.LT_FLT and abs(cast(float, limit_threshold)) > 0.1:
if C.region in [REG_CN, REG_TW]:
self.logger.warning(f"limit_threshold may not be set to a reasonable value")
if isinstance(deal_price, str):
if deal_price[0] != "$":
deal_price = "$" + deal_price
self.buy_price = self.sell_price = deal_price
elif isinstance(deal_price, (tuple, list)):
self.buy_price, self.sell_price = cast(Tuple[str, str], deal_price)
else:
raise NotImplementedError(f"This type of input is not supported")
if isinstance(codes, str):
codes = D.instruments(codes)
self.codes = codes
# Necessary fields
# $close is for calculating the total value at end of each day.
# - if $close is None, the stock on that day is regarded as suspended.
# $factor is for rounding to the trading unit
# $change is for calculating the limit of the stock
# get volume limit from kwargs
self.buy_vol_limit, self.sell_vol_limit, vol_lt_fields = self._get_vol_limit(volume_threshold)
necessary_fields = {self.buy_price, self.sell_price, "$close", "$change", "$factor", "$volume"}
if self.limit_type == self.LT_TP_EXP:
assert isinstance(limit_threshold, tuple)
for exp in limit_threshold:
necessary_fields.add(exp)
all_fields = list(necessary_fields | set(vol_lt_fields) | set(subscribe_fields))
self.all_fields = all_fields
self.open_cost = open_cost
self.close_cost = close_cost
self.min_cost = min_cost
self.impact_cost = impact_cost
self.limit_threshold: Union[Tuple[str, str], float, None] = limit_threshold
self.volume_threshold = volume_threshold
self.extra_quote = extra_quote
self.get_quote_from_qlib()
# init quote by quote_df
self.quote_cls = quote_cls
self.quote: BaseQuote = self.quote_cls(self.quote_df, freq)
def get_quote_from_qlib(self) -> None:
# get stock data from qlib
if len(self.codes) == 0:
self.codes = D.instruments()
self.quote_df = D.features(
self.codes,
self.all_fields,
self.start_time,
self.end_time,
freq=self.freq,
disk_cache=True,
)
self.quote_df.columns = self.all_fields
# check buy_price data and sell_price data
for attr in ("buy_price", "sell_price"):
pstr = getattr(self, attr) # price string
if self.quote_df[pstr].isna().any():
self.logger.warning("{} field data contains nan.".format(pstr))
# update trade_w_adj_price
if (self.quote_df["$factor"].isna() & ~self.quote_df["$close"].isna()).any():
# The 'factor.day.bin' file not exists, and `factor` field contains `nan`
# Use adjusted price
self.trade_w_adj_price = True
self.logger.warning("factor.day.bin file not exists or factor contains `nan`. Order using adjusted_price.")
if self.trade_unit is not None:
self.logger.warning(f"trade unit {self.trade_unit} is not supported in adjusted_price mode.")
else:
# The `factor.day.bin` file exists and all data `close` and `factor` are not `nan`
# Use normal price
self.trade_w_adj_price = False
# update limit
self._update_limit(self.limit_threshold)
# concat extra_quote
if self.extra_quote is not None:
# process extra_quote
if "$close" not in self.extra_quote:
raise ValueError("$close is necessray in extra_quote")
for attr in "buy_price", "sell_price":
pstr = getattr(self, attr) # price string
if pstr not in self.extra_quote.columns:
self.extra_quote[pstr] = self.extra_quote["$close"]
self.logger.warning(f"No {pstr} set for extra_quote. Use $close as {pstr}.")
if "$factor" not in self.extra_quote.columns:
self.extra_quote["$factor"] = 1.0
self.logger.warning("No $factor set for extra_quote. Use 1.0 as $factor.")
if "limit_sell" not in self.extra_quote.columns:
self.extra_quote["limit_sell"] = False
self.logger.warning("No limit_sell set for extra_quote. All stock will be able to be sold.")
if "limit_buy" not in self.extra_quote.columns:
self.extra_quote["limit_buy"] = False
self.logger.warning("No limit_buy set for extra_quote. All stock will be able to be bought.")
assert set(self.extra_quote.columns) == set(self.quote_df.columns) - {"$change"}
self.quote_df = pd.concat([self.quote_df, self.extra_quote], sort=False, axis=0)
LT_TP_EXP = "(exp)" # Tuple[str, str]: the limitation is calculated by a Qlib expression.
LT_FLT = "float" # float: the trading limitation is based on `abs($change) < limit_threshold`
LT_NONE = "none" # none: there is no trading limitation
def _get_limit_type(self, limit_threshold: Union[tuple, float, None]) -> str:
"""get limit type"""
if isinstance(limit_threshold, tuple):
return self.LT_TP_EXP
elif isinstance(limit_threshold, float):
return self.LT_FLT
elif limit_threshold is None:
return self.LT_NONE
else:
raise NotImplementedError(f"This type of `limit_threshold` is not supported")
def _update_limit(self, limit_threshold: Union[Tuple, float, None]) -> None:
# $close may contain NaN, the nan indicates that the stock is not tradable at that timestamp
suspended = self.quote_df["$close"].isna()
# check limit_threshold
limit_type = self._get_limit_type(limit_threshold)
if limit_type == self.LT_NONE:
self.quote_df["limit_buy"] = suspended
self.quote_df["limit_sell"] = suspended
elif limit_type == self.LT_TP_EXP:
# set limit
limit_threshold = cast(tuple, limit_threshold)
# astype bool is necessary, because quote_df is an expression and could be float
self.quote_df["limit_buy"] = self.quote_df[limit_threshold[0]].astype("bool") | suspended
self.quote_df["limit_sell"] = self.quote_df[limit_threshold[1]].astype("bool") | suspended
elif limit_type == self.LT_FLT:
limit_threshold = cast(float, limit_threshold)
self.quote_df["limit_buy"] = self.quote_df["$change"].ge(limit_threshold) | suspended
self.quote_df["limit_sell"] = (
self.quote_df["$change"].le(-limit_threshold) | suspended
) # pylint: disable=E1130
def _get_vol_limit(volume_threshold: Union[tuple, dict, None]) -> Tuple[Optional[list], Optional[list], set]:
"""
preprocess the volume limit.
get the fields need to get from qlib.
get the volume limit list of buying and selling which is composed of all limits.
Parameters
----------
volume_threshold :
please refer to the doc of exchange.
Returns
-------
fields: set
the fields need to get from qlib.
buy_vol_limit: List[Tuple[str]]
all volume limits of buying.
sell_vol_limit: List[Tuple[str]]
all volume limits of selling.
Raises
------
ValueError
the format of volume_threshold is not supported.
"""
if volume_threshold is None:
return None, None, set()
fields = set()
buy_vol_limit = []
sell_vol_limit = []
if isinstance(volume_threshold, tuple):
volume_threshold = {"all": volume_threshold}
assert isinstance(volume_threshold, dict)
for key, vol_limit in volume_threshold.items():
assert isinstance(vol_limit, tuple)
fields.add(vol_limit[1])
if key in ("buy", "all"):
buy_vol_limit.append(vol_limit)
if key in ("sell", "all"):
sell_vol_limit.append(vol_limit)
return buy_vol_limit, sell_vol_limit, fields
def check_stock_limit(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
direction: int | None = None,
) -> bool:
"""
Parameters
----------
stock_id : str
start_time: pd.Timestamp
end_time: pd.Timestamp
direction : int, optional
trade direction, by default None
- if direction is None, check if tradable for buying and selling.
- if direction == Order.BUY, check the if tradable for buying
- if direction == Order.SELL, check the sell limit for selling.
Returns
-------
True: the trading of the stock is limited (maybe hit the highest/lowest price), hence the stock is not tradable
False: the trading of the stock is not limited, hence the stock may be tradable
"""
# NOTE:
# **all** is used when checking limitation.
# For example, the stock trading is limited in a day if every minute is limited in a day if every minute is limited.
if direction is None:
# The trading limitation is related to the trading direction
# if the direction is not provided, then any limitation from buy or sell will result in trading limitation
buy_limit = self.quote.get_data(stock_id, start_time, end_time, field="limit_buy", method="all")
sell_limit = self.quote.get_data(stock_id, start_time, end_time, field="limit_sell", method="all")
return bool(buy_limit or sell_limit)
elif direction == Order.BUY:
return cast(bool, self.quote.get_data(stock_id, start_time, end_time, field="limit_buy", method="all"))
elif direction == Order.SELL:
return cast(bool, self.quote.get_data(stock_id, start_time, end_time, field="limit_sell", method="all"))
else:
raise ValueError(f"direction {direction} is not supported!")
def check_stock_suspended(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
) -> bool:
"""if stock is suspended(hence not tradable), True will be returned"""
# is suspended
if stock_id in self.quote.get_all_stock():
# suspended stocks are represented by None $close stock
# The $close may contain NaN,
close = self.quote.get_data(stock_id, start_time, end_time, "$close")
if close is None:
# if no close record exists
return True
elif isinstance(close, IndexData):
# **any** non-NaN $close represents trading opportunity may exist
# if all returned is nan, then the stock is suspended
return cast(bool, cast(IndexData, close).isna().all())
else:
# it is single value, make sure is not None
return np.isnan(close)
else:
# if the stock is not in the stock list, then it is not tradable and regarded as suspended
return True
def is_stock_tradable(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
direction: int | None = None,
) -> bool:
# check if stock can be traded
return not (
self.check_stock_suspended(stock_id, start_time, end_time)
or self.check_stock_limit(stock_id, start_time, end_time, direction)
)
def check_order(self, order: Order) -> bool:
# check limit and suspended
return self.is_stock_tradable(order.stock_id, order.start_time, order.end_time, order.direction)
def deal_order(
self,
order: Order,
trade_account: Account | None = None,
position: BasePosition | None = None,
dealt_order_amount: Dict[str, float] = defaultdict(float),
) -> Tuple[float, float, float]:
"""
Deal order when the actual transaction
the results section in `Order` will be changed.
:param order: Deal the order.
:param trade_account: Trade account to be updated after dealing the order.
:param position: position to be updated after dealing the order.
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
:return: trade_val, trade_cost, trade_price
"""
# check order first.
if not self.check_order(order):
order.deal_amount = 0.0
# using np.nan instead of None to make it more convenient to show the value in format string
self.logger.debug(f"Order failed due to trading limitation: {order}")
return 0.0, 0.0, np.nan
if trade_account is not None and position is not None:
raise ValueError("trade_account and position can only choose one")
# NOTE: order will be changed in this function
trade_price, trade_val, trade_cost = self._calc_trade_info_by_order(
order,
trade_account.current_position if trade_account else position,
dealt_order_amount,
)
if trade_val > 1e-5:
# If the order can only be deal 0 value. Nothing to be updated
# Otherwise, it will result in
# 1) some stock with 0 value in the position
# 2) `trade_unit` of trade_cost will be lost in user account
if trade_account:
trade_account.update_order(order=order, trade_val=trade_val, cost=trade_cost, trade_price=trade_price)
elif position:
position.update_order(order=order, trade_val=trade_val, cost=trade_cost, trade_price=trade_price)
return trade_val, trade_cost, trade_price
def get_quote_info(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
field: str,
method: str = "ts_data_last",
) -> Union[None, int, float, bool, IndexData]:
return self.quote.get_data(stock_id, start_time, end_time, field=field, method=method)
def get_close(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
method: str = "ts_data_last",
) -> Union[None, int, float, bool, IndexData]:
return self.quote.get_data(stock_id, start_time, end_time, field="$close", method=method)
def get_volume(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
method: Optional[str] = "sum",
) -> Union[None, int, float, bool, IndexData]:
"""get the total deal volume of stock with `stock_id` between the time interval [start_time, end_time)"""
return self.quote.get_data(stock_id, start_time, end_time, field="$volume", method=method)
def get_deal_price(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
direction: OrderDir,
method: Optional[str] = "ts_data_last",
) -> Union[None, int, float, bool, IndexData]:
if direction == OrderDir.SELL:
pstr = self.sell_price
elif direction == OrderDir.BUY:
pstr = self.buy_price
else:
raise NotImplementedError(f"This type of input is not supported")
deal_price = self.quote.get_data(stock_id, start_time, end_time, field=pstr, method=method)
if method is not None and (deal_price is None or np.isnan(deal_price) or deal_price <= 1e-08):
self.logger.warning(f"(stock_id:{stock_id}, trade_time:{(start_time, end_time)}, {pstr}): {deal_price}!!!")
self.logger.warning(f"setting deal_price to close price")
deal_price = self.get_close(stock_id, start_time, end_time, method)
return deal_price
def get_factor(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
) -> Optional[float]:
"""
Returns
-------
Optional[float]:
`None`: if the stock is suspended `None` may be returned
`float`: return factor if the factor exists
"""
assert start_time is not None and end_time is not None, "the time range must be given"
if stock_id not in self.quote.get_all_stock():
return None
return self.quote.get_data(stock_id, start_time, end_time, field="$factor", method="ts_data_last")
def generate_amount_position_from_weight_position(
self,
weight_position: dict,
cash: float,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
direction: OrderDir = OrderDir.BUY,
) -> dict:
"""
Generates the target position according to the weight and the cash.
NOTE: All the cash will be assigned to the tradable stock.
Parameter:
weight_position : dict {stock_id : weight}; allocate cash by weight_position
among then, weight must be in this range: 0 < weight < 1
cash : cash
start_time : the start time point of the step
end_time : the end time point of the step
direction : the direction of the deal price for estimating the amount
# NOTE: this function is used for calculating target position. So the default direction is buy
"""
# calculate the total weight of tradable value
tradable_weight = 0.0
for stock_id, wp in weight_position.items():
if self.is_stock_tradable(stock_id=stock_id, start_time=start_time, end_time=end_time):
# weight_position must be greater than 0 and less than 1
if wp < 0 or wp > 1:
raise ValueError(
"weight_position is {}, " "weight_position is not in the range of (0, 1).".format(wp),
)
tradable_weight += wp
if tradable_weight - 1.0 >= 1e-5:
raise ValueError("tradable_weight is {}, can not greater than 1.".format(tradable_weight))
amount_dict = {}
for stock_id in weight_position:
if weight_position[stock_id] > 0.0 and self.is_stock_tradable(
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
):
amount_dict[stock_id] = (
cash
* weight_position[stock_id]
/ tradable_weight
// self.get_deal_price(
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
direction=direction,
)
)
return amount_dict
def get_real_deal_amount(self, current_amount: float, target_amount: float, factor: float | None = None) -> float:
"""
Calculate the real adjust deal amount when considering the trading unit
:param current_amount:
:param target_amount:
:param factor:
:return real_deal_amount; Positive deal_amount indicates buying more stock.
"""
if current_amount == target_amount:
return 0
elif current_amount < target_amount:
deal_amount = target_amount - current_amount
deal_amount = self.round_amount_by_trade_unit(deal_amount, factor)
return deal_amount
else:
if target_amount == 0:
return -current_amount
else:
deal_amount = current_amount - target_amount
deal_amount = self.round_amount_by_trade_unit(deal_amount, factor)
return -deal_amount
def generate_order_for_target_amount_position(
self,
target_position: dict,
current_position: dict,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
) -> List[Order]:
"""
Note: some future information is used in this function
Parameter:
target_position : dict { stock_id : amount }
current_position : dict { stock_id : amount}
trade_unit : trade_unit
down sample : for amount 321 and trade_unit 100, deal_amount is 300
deal order on trade_date
"""
# split buy and sell for further use
buy_order_list = []
sell_order_list = []
# three parts: kept stock_id, dropped stock_id, new stock_id
# handle kept stock_id
# because the order of the set is not fixed, the trading order of the stock is different, so that the backtest
# results of the same parameter are different;
# so here we sort stock_id, and then randomly shuffle the order of stock_id
# because the same random seed is used, the final stock_id order is fixed
sorted_ids = sorted(set(list(current_position.keys()) + list(target_position.keys())))
random.seed(0)
random.shuffle(sorted_ids)
for stock_id in sorted_ids:
# Do not generate order for the non-tradable stocks
if not self.is_stock_tradable(stock_id=stock_id, start_time=start_time, end_time=end_time):
continue
target_amount = target_position.get(stock_id, 0)
current_amount = current_position.get(stock_id, 0)
factor = self.get_factor(stock_id, start_time=start_time, end_time=end_time)
deal_amount = self.get_real_deal_amount(current_amount, target_amount, factor)
if deal_amount == 0:
continue
if deal_amount > 0:
# buy stock
buy_order_list.append(
Order(
stock_id=stock_id,
amount=deal_amount,
direction=Order.BUY,
start_time=start_time,
end_time=end_time,
factor=factor,
),
)
else:
# sell stock
sell_order_list.append(
Order(
stock_id=stock_id,
amount=abs(deal_amount),
direction=Order.SELL,
start_time=start_time,
end_time=end_time,
factor=factor,
),
)
# return order_list : buy + sell
return sell_order_list + buy_order_list
def calculate_amount_position_value(
self,
amount_dict: dict,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
only_tradable: bool = False,
direction: OrderDir = OrderDir.SELL,
) -> float:
"""Parameter
position : Position()
amount_dict : {stock_id : amount}
direction : the direction of the deal price for estimating the amount
# NOTE:
This function is used for calculating current position value.
So the default direction is sell.
"""
value = 0
for stock_id in amount_dict:
if not only_tradable or (
not self.check_stock_suspended(stock_id=stock_id, start_time=start_time, end_time=end_time)
and not self.check_stock_limit(stock_id=stock_id, start_time=start_time, end_time=end_time)
):
value += (
self.get_deal_price(
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
direction=direction,
)
* amount_dict[stock_id]
)
return value
def _get_factor_or_raise_error(
self,
factor: float | None = None,
stock_id: str | None = None,
start_time: pd.Timestamp = None,
end_time: pd.Timestamp = None,
) -> float:
"""Please refer to the docs of get_amount_of_trade_unit"""
if factor is None:
if stock_id is not None and start_time is not None and end_time is not None:
factor = self.get_factor(stock_id=stock_id, start_time=start_time, end_time=end_time)
else:
raise ValueError(f"`factor` and (`stock_id`, `start_time`, `end_time`) can't both be None")
assert factor is not None
return factor
def get_amount_of_trade_unit(
self,
factor: float | None = None,
stock_id: str | None = None,
start_time: pd.Timestamp = None,
end_time: pd.Timestamp = None,
) -> Optional[float]:
"""
get the trade unit of amount based on **factor**
the factor can be given directly or calculated in given time range and stock id.
`factor` has higher priority than `stock_id`, `start_time` and `end_time`
Parameters
----------
factor : float
the adjusted factor
stock_id : str
the id of the stock
start_time :
the start time of trading range
end_time :
the end time of trading range
"""
if not self.trade_w_adj_price and self.trade_unit is not None:
factor = self._get_factor_or_raise_error(
factor=factor,
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
)
return self.trade_unit / factor
else:
return None
def round_amount_by_trade_unit(
self,
deal_amount: float,
factor: float | None = None,
stock_id: str | None = None,
start_time: pd.Timestamp = None,
end_time: pd.Timestamp = None,
) -> float:
"""Parameter
Please refer to the docs of get_amount_of_trade_unit
deal_amount : float, adjusted amount
factor : float, adjusted factor
return : float, real amount
"""
if not self.trade_w_adj_price and self.trade_unit is not None:
# the minimal amount is 1. Add 0.1 for solving precision problem.
factor = self._get_factor_or_raise_error(
factor=factor,
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
)
return (deal_amount * factor + 0.1) // self.trade_unit * self.trade_unit / factor
return deal_amount
def _clip_amount_by_volume(self, order: Order, dealt_order_amount: dict) -> Optional[float]:
"""parse the capacity limit string and return the actual amount of orders that can be executed.
NOTE:
this function will change the order.deal_amount **inplace**
- This will make the order info more accurate
Parameters
----------
order : Order
the order to be executed.
dealt_order_amount : dict
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
"""
vol_limit = self.buy_vol_limit if order.direction == Order.BUY else self.sell_vol_limit
if vol_limit is None:
return order.deal_amount
vol_limit_num: List[float] = []
for limit in vol_limit:
assert isinstance(limit, tuple)
if limit[0] == "current":
limit_value = self.quote.get_data(
order.stock_id,
order.start_time,
order.end_time,
field=limit[1],
method="sum",
)
vol_limit_num.append(cast(float, limit_value))
elif limit[0] == "cum":
limit_value = self.quote.get_data(
order.stock_id,
order.start_time,
order.end_time,
field=limit[1],
method="ts_data_last",
)
vol_limit_num.append(limit_value - dealt_order_amount[order.stock_id])
else:
raise ValueError(f"{limit[0]} is not supported")
vol_limit_min = min(vol_limit_num)
orig_deal_amount = order.deal_amount
order.deal_amount = max(min(vol_limit_min, orig_deal_amount), 0)
if vol_limit_min < orig_deal_amount:
self.logger.debug(f"Order clipped due to volume limitation: {order}, {list(zip(vol_limit_num, vol_limit))}")
return None
def _get_buy_amount_by_cash_limit(self, trade_price: float, cash: float, cost_ratio: float) -> float:
"""return the real order amount after cash limit for buying.
Parameters
----------
trade_price : float
cash : float
cost_ratio : float
Return
----------
float
the real order amount after cash limit for buying.
"""
max_trade_amount = 0.0
if cash >= self.min_cost:
# critical_price means the stock transaction price when the service fee is equal to min_cost.
critical_price = self.min_cost / cost_ratio + self.min_cost
if cash >= critical_price:
# the service fee is equal to cost_ratio * trade_amount
max_trade_amount = cash / (1 + cost_ratio) / trade_price
else:
# the service fee is equal to min_cost
max_trade_amount = (cash - self.min_cost) / trade_price
return max_trade_amount
def _calc_trade_info_by_order(
self,
order: Order,
position: Optional[BasePosition],
dealt_order_amount: dict,
) -> Tuple[float, float, float]:
"""
Calculation of trade info
**NOTE**: Order will be changed in this function
:param order:
:param position: Position
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
:return: trade_price, trade_val, trade_cost
"""
trade_price = cast(
float,
self.get_deal_price(order.stock_id, order.start_time, order.end_time, direction=order.direction),
)
total_trade_val = cast(float, self.get_volume(order.stock_id, order.start_time, order.end_time)) * trade_price
order.factor = self.get_factor(order.stock_id, order.start_time, order.end_time)
order.deal_amount = order.amount # set to full amount and clip it step by step
# Clipping amount first
# - It simulates that the order is rejected directly by the exchange due to large order
# Another choice is placing it after rounding the order
# - It simulates that the large order is submitted, but partial is dealt regardless of rounding by trading unit.
self._clip_amount_by_volume(order, dealt_order_amount)
# TODO: the adjusted cost ratio can be overestimated as deal_amount will be clipped in the next steps
trade_val = order.deal_amount * trade_price
if not total_trade_val or np.isnan(total_trade_val):
# TODO: assert trade_val == 0, f"trade_val != 0, total_trade_val: {total_trade_val}; order info: {order}"
adj_cost_ratio = self.impact_cost
else:
adj_cost_ratio = self.impact_cost * (trade_val / total_trade_val) ** 2
if order.direction == Order.SELL:
cost_ratio = self.close_cost + adj_cost_ratio
# sell
# if we don't know current position, we choose to sell all
# Otherwise, we clip the amount based on current position
if position is not None:
current_amount = (
position.get_stock_amount(order.stock_id) if position.check_stock(order.stock_id) else 0
)
if not np.isclose(order.deal_amount, current_amount):
# when not selling last stock. rounding is necessary
order.deal_amount = self.round_amount_by_trade_unit(
min(current_amount, order.deal_amount),
order.factor,
)
# in case of negative value of cash
if position.get_cash() + order.deal_amount * trade_price < max(
order.deal_amount * trade_price * cost_ratio,
self.min_cost,
):
order.deal_amount = 0
self.logger.debug(f"Order clipped due to cash limitation: {order}")
elif order.direction == Order.BUY:
cost_ratio = self.open_cost + adj_cost_ratio
# buy
if position is not None:
cash = position.get_cash()
trade_val = order.deal_amount * trade_price
if cash < max(trade_val * cost_ratio, self.min_cost):
# cash cannot cover cost
order.deal_amount = 0
self.logger.debug(f"Order clipped due to cost higher than cash: {order}")
elif cash < trade_val + max(trade_val * cost_ratio, self.min_cost):
# The money is not enough
max_buy_amount = self._get_buy_amount_by_cash_limit(trade_price, cash, cost_ratio)
order.deal_amount = self.round_amount_by_trade_unit(
min(max_buy_amount, order.deal_amount),
order.factor,
)
self.logger.debug(f"Order clipped due to cash limitation: {order}")
else:
# The money is enough
order.deal_amount = self.round_amount_by_trade_unit(order.deal_amount, order.factor)
else:
# Unknown amount of money. Just round the amount
order.deal_amount = self.round_amount_by_trade_unit(order.deal_amount, order.factor)
else:
raise NotImplementedError("order direction {} error".format(order.direction))
trade_val = order.deal_amount * trade_price
trade_cost = max(trade_val * cost_ratio, self.min_cost)
if trade_val <= 1e-5:
# if dealing is not successful, the trade_cost should be zero.
trade_cost = 0
return trade_price, trade_val, trade_cost
def get_order_helper(self) -> OrderHelper:
if not hasattr(self, "_order_helper"):
# cache to avoid recreate the same instance
self._order_helper = OrderHelper(self)
return self._order_helper
The provided code snippet includes necessary dependencies for implementing the `prepare` function. Write a Python function `def prepare(um, today, user_id, exchange_config=None)` to solve the following problem:
1. Get the dates that need to do trading till today for user {user_id} dates[0] indicate the latest trading date of User{user_id}, if User{user_id} haven't do trading before, than dates[0] presents the init date of User{user_id}. 2. Set the exchange with exchange_config file Parameter um : UserManager() today : pd.Timestamp() user_id : str :return dates : list of pd.Timestamp trade_exchange : Exchange()
Here is the function:
def prepare(um, today, user_id, exchange_config=None):
"""
1. Get the dates that need to do trading till today for user {user_id}
dates[0] indicate the latest trading date of User{user_id},
if User{user_id} haven't do trading before, than dates[0] presents the init date of User{user_id}.
2. Set the exchange with exchange_config file
Parameter
um : UserManager()
today : pd.Timestamp()
user_id : str
:return
dates : list of pd.Timestamp
trade_exchange : Exchange()
"""
# get latest trading date for {user_id}
# if is None, indicate it haven't traded, then last trading date is init date of {user_id}
latest_trading_date = um.users[user_id].get_latest_trading_date()
if not latest_trading_date:
latest_trading_date = um.user_record.loc[user_id][0]
if str(today.date()) < latest_trading_date:
log.warning("user_id:{}, last trading date {} after today {}".format(user_id, latest_trading_date, today))
return [pd.Timestamp(latest_trading_date)], None
dates = D.calendar(
start_time=pd.Timestamp(latest_trading_date),
end_time=pd.Timestamp(today),
future=True,
)
dates = list(dates)
dates.append(get_next_trading_date(dates[-1], future=True))
if exchange_config:
with pathlib.Path(exchange_config).open("r") as fp:
exchange_paras = yaml.safe_load(fp)
else:
exchange_paras = {}
trade_exchange = Exchange(trade_dates=dates, **exchange_paras)
return dates, trade_exchange | 1. Get the dates that need to do trading till today for user {user_id} dates[0] indicate the latest trading date of User{user_id}, if User{user_id} haven't do trading before, than dates[0] presents the init date of User{user_id}. 2. Set the exchange with exchange_config file Parameter um : UserManager() today : pd.Timestamp() user_id : str :return dates : list of pd.Timestamp trade_exchange : Exchange() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.