code stringlengths 281 23.7M |
|---|
class VGG(nn.Module):
def __init__(self, num_classes=10, depth=16, dropout=0.0, multi_fc=False):
super(VGG, self).__init__()
self.inplances = 64
self.conv1 = nn.Conv2d(3, self.inplances, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(self.inplances)
self.conv2 = nn.Conv2d(self.inplances, self.inplances, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(self.inplances)
self.relu = nn.ReLU(True)
self.layer1 = self._make_layers(128, 2)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
if (depth == 16):
num_layer = 3
elif (depth == 13):
num_layer = 2
elif (depth == 19):
num_layer = 4
self.layer2 = self._make_layers(256, num_layer)
self.layer3 = self._make_layers(512, num_layer)
self.layer4 = self._make_layers(512, num_layer)
self.classifier = nn.Sequential(nn.Linear(512, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def _make_layers(self, input, num_layer):
layers = []
for i in range(num_layer):
conv2d = nn.Conv2d(self.inplances, input, kernel_size=3, padding=1)
layers += [conv2d, nn.BatchNorm2d(input), nn.ReLU(inplace=True)]
self.inplances = input
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x |
def generate_model_output_test1() -> Dict[(str, torch._tensor.Tensor)]:
return {'predictions': torch.tensor([[1.0, 0.0, 0.51, 0.8, 1.0, 0.0, 0.51, 0.8, 1.0, 0.0, 0.51, 0.8]]), 'session': torch.tensor([[1, 1, 1, 1, 1, 1, 1, (- 1), (- 1), (- 1), (- 1), (- 1)]]), 'labels': torch.tensor([[0.9, 0.1, 0.2, 0.3, 0.9, 0.9, 0.0, 0.9, 0.1, 0.4, 0.9, 0.1]]), 'weights': torch.tensor([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]), 'expected_recall': torch.tensor([0.5])} |
class KBKDFHMAC(KeyDerivationFunction):
def __init__(self, algorithm: hashes.HashAlgorithm, mode: Mode, length: int, rlen: int, llen: (int | None), location: CounterLocation, label: (bytes | None), context: (bytes | None), fixed: (bytes | None), backend: typing.Any=None, *, break_location: (int | None)=None):
if (not isinstance(algorithm, hashes.HashAlgorithm)):
raise UnsupportedAlgorithm('Algorithm supplied is not a supported hash algorithm.', _Reasons.UNSUPPORTED_HASH)
from cryptography.hazmat.backends.openssl.backend import backend as ossl
if (not ossl.hmac_supported(algorithm)):
raise UnsupportedAlgorithm('Algorithm supplied is not a supported hmac algorithm.', _Reasons.UNSUPPORTED_HASH)
self._algorithm = algorithm
self._deriver = _KBKDFDeriver(self._prf, mode, length, rlen, llen, location, break_location, label, context, fixed)
def _prf(self, key_material: bytes) -> hmac.HMAC:
return hmac.HMAC(key_material, self._algorithm)
def derive(self, key_material: bytes) -> bytes:
return self._deriver.derive(key_material, self._algorithm.digest_size)
def verify(self, key_material: bytes, expected_key: bytes) -> None:
if (not constant_time.bytes_eq(self.derive(key_material), expected_key)):
raise InvalidKey |
class ListDataset(BaseWrapperDataset):
def __init__(self, dataset, sizes=None):
super().__init__(dataset)
self._sizes = sizes
def collater(self, samples):
return samples
def sizes(self):
return self._sizes
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def set_epoch(self, epoch):
pass |
class JTNNDecoder(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, embedding):
super(JTNNDecoder, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab.size()
self.vocab = vocab
self.embedding = embedding
self.W_z = nn.Linear((2 * hidden_size), hidden_size)
self.U_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_r = nn.Linear(hidden_size, hidden_size)
self.W_h = nn.Linear((2 * hidden_size), hidden_size)
self.W = nn.Linear((hidden_size + latent_size), hidden_size)
self.U = nn.Linear((hidden_size + latent_size), hidden_size)
self.U_i = nn.Linear((2 * hidden_size), hidden_size)
self.W_o = nn.Linear(hidden_size, self.vocab_size)
self.U_o = nn.Linear(hidden_size, 1)
self.pred_loss = nn.CrossEntropyLoss(size_average=False)
self.stop_loss = nn.BCEWithLogitsLoss(size_average=False)
def aggregate(self, hiddens, contexts, x_tree_vecs, mode):
if (mode == 'word'):
(V, V_o) = (self.W, self.W_o)
elif (mode == 'stop'):
(V, V_o) = (self.U, self.U_o)
else:
raise ValueError('aggregate mode is wrong')
tree_contexts = x_tree_vecs.index_select(0, contexts)
input_vec = torch.cat([hiddens, tree_contexts], dim=(- 1))
output_vec = F.relu(V(input_vec))
return V_o(output_vec)
def forward(self, mol_batch, x_tree_vecs):
(pred_hiddens, pred_contexts, pred_targets) = ([], [], [])
(stop_hiddens, stop_contexts, stop_targets) = ([], [], [])
traces = []
for mol_tree in mol_batch:
s = []
dfs(s, mol_tree.nodes[0], (- 1))
traces.append(s)
for node in mol_tree.nodes:
node.neighbors = []
batch_size = len(mol_batch)
pred_hiddens.append(create_var(torch.zeros(len(mol_batch), self.hidden_size)))
pred_targets.extend([mol_tree.nodes[0].wid for mol_tree in mol_batch])
pred_contexts.append(create_var(torch.LongTensor(range(batch_size))))
max_iter = max([len(tr) for tr in traces])
padding = create_var(torch.zeros(self.hidden_size), False)
h = {}
for t in range(max_iter):
prop_list = []
batch_list = []
for (i, plist) in enumerate(traces):
if (t < len(plist)):
prop_list.append(plist[t])
batch_list.append(i)
cur_x = []
(cur_h_nei, cur_o_nei) = ([], [])
for (node_x, real_y, _) in prop_list:
cur_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors if (node_y.idx != real_y.idx)]
pad_len = (MAX_NB - len(cur_nei))
cur_h_nei.extend(cur_nei)
cur_h_nei.extend(([padding] * pad_len))
cur_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors]
pad_len = (MAX_NB - len(cur_nei))
cur_o_nei.extend(cur_nei)
cur_o_nei.extend(([padding] * pad_len))
cur_x.append(node_x.wid)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_h_nei = torch.stack(cur_h_nei, dim=0).view((- 1), MAX_NB, self.hidden_size)
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
cur_o_nei = torch.stack(cur_o_nei, dim=0).view((- 1), MAX_NB, self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
(pred_target, pred_list) = ([], [])
stop_target = []
for (i, m) in enumerate(prop_list):
(node_x, node_y, direction) = m
(x, y) = (node_x.idx, node_y.idx)
h[(x, y)] = new_h[i]
node_y.neighbors.append(node_x)
if (direction == 1):
pred_target.append(node_y.wid)
pred_list.append(i)
stop_target.append(direction)
cur_batch = create_var(torch.LongTensor(batch_list))
stop_hidden = torch.cat([cur_x, cur_o], dim=1)
stop_hiddens.append(stop_hidden)
stop_contexts.append(cur_batch)
stop_targets.extend(stop_target)
if (len(pred_list) > 0):
batch_list = [batch_list[i] for i in pred_list]
cur_batch = create_var(torch.LongTensor(batch_list))
pred_contexts.append(cur_batch)
cur_pred = create_var(torch.LongTensor(pred_list))
pred_hiddens.append(new_h.index_select(0, cur_pred))
pred_targets.extend(pred_target)
(cur_x, cur_o_nei) = ([], [])
for mol_tree in mol_batch:
node_x = mol_tree.nodes[0]
cur_x.append(node_x.wid)
cur_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors]
pad_len = (MAX_NB - len(cur_nei))
cur_o_nei.extend(cur_nei)
cur_o_nei.extend(([padding] * pad_len))
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_o_nei = torch.stack(cur_o_nei, dim=0).view((- 1), MAX_NB, self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
stop_hidden = torch.cat([cur_x, cur_o], dim=1)
stop_hiddens.append(stop_hidden)
stop_contexts.append(create_var(torch.LongTensor(range(batch_size))))
stop_targets.extend(([0] * len(mol_batch)))
pred_contexts = torch.cat(pred_contexts, dim=0)
pred_hiddens = torch.cat(pred_hiddens, dim=0)
pred_scores = self.aggregate(pred_hiddens, pred_contexts, x_tree_vecs, 'word')
pred_targets = create_var(torch.LongTensor(pred_targets))
pred_loss = (self.pred_loss(pred_scores, pred_targets) / len(mol_batch))
(_, preds) = torch.max(pred_scores, dim=1)
pred_acc = torch.eq(preds, pred_targets).float()
pred_acc = (torch.sum(pred_acc) / pred_targets.nelement())
stop_contexts = torch.cat(stop_contexts, dim=0)
stop_hiddens = torch.cat(stop_hiddens, dim=0)
stop_hiddens = F.relu(self.U_i(stop_hiddens))
stop_scores = self.aggregate(stop_hiddens, stop_contexts, x_tree_vecs, 'stop')
stop_scores = stop_scores.squeeze((- 1))
stop_targets = create_var(torch.Tensor(stop_targets))
stop_loss = (self.stop_loss(stop_scores, stop_targets) / len(mol_batch))
stops = torch.ge(stop_scores, 0).float()
stop_acc = torch.eq(stops, stop_targets).float()
stop_acc = (torch.sum(stop_acc) / stop_targets.nelement())
return (pred_loss, stop_loss, pred_acc.item(), stop_acc.item())
def decode(self, x_tree_vecs, prob_decode):
assert (x_tree_vecs.size(0) == 1)
stack = []
init_hiddens = create_var(torch.zeros(1, self.hidden_size))
zero_pad = create_var(torch.zeros(1, 1, self.hidden_size))
contexts = create_var(torch.LongTensor(1).zero_())
root_score = self.aggregate(init_hiddens, contexts, x_tree_vecs, 'word')
(_, root_wid) = torch.max(root_score, dim=1)
root_wid = root_wid.item()
root = MolTreeNode(self.vocab.get_smiles(root_wid))
root.wid = root_wid
root.idx = 0
stack.append((root, self.vocab.get_slots(root.wid)))
all_nodes = [root]
h = {}
for step in range(MAX_DECODE_LEN):
(node_x, fa_slot) = stack[(- 1)]
cur_h_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors]
if (len(cur_h_nei) > 0):
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(1, (- 1), self.hidden_size)
else:
cur_h_nei = zero_pad
cur_x = create_var(torch.LongTensor([node_x.wid]))
cur_x = self.embedding(cur_x)
cur_h = cur_h_nei.sum(dim=1)
stop_hiddens = torch.cat([cur_x, cur_h], dim=1)
stop_hiddens = F.relu(self.U_i(stop_hiddens))
stop_score = self.aggregate(stop_hiddens, contexts, x_tree_vecs, 'stop')
if prob_decode:
backtrack = (torch.bernoulli(torch.sigmoid(stop_score)).item() == 0)
else:
backtrack = (stop_score.item() < 0)
if (not backtrack):
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
pred_score = self.aggregate(new_h, contexts, x_tree_vecs, 'word')
if prob_decode:
sort_wid = torch.multinomial(F.softmax(pred_score, dim=1).squeeze(), 5)
else:
(_, sort_wid) = torch.sort(pred_score, dim=1, descending=True)
sort_wid = sort_wid.data.squeeze()
next_wid = None
for wid in sort_wid[:5]:
slots = self.vocab.get_slots(wid)
node_y = MolTreeNode(self.vocab.get_smiles(wid))
if (have_slots(fa_slot, slots) and can_assemble(node_x, node_y)):
next_wid = wid
next_slots = slots
break
if (next_wid is None):
backtrack = True
else:
node_y = MolTreeNode(self.vocab.get_smiles(next_wid))
node_y.wid = next_wid
node_y.idx = len(all_nodes)
node_y.neighbors.append(node_x)
h[(node_x.idx, node_y.idx)] = new_h[0]
stack.append((node_y, next_slots))
all_nodes.append(node_y)
if backtrack:
if (len(stack) == 1):
break
(node_fa, _) = stack[(- 2)]
cur_h_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors if (node_y.idx != node_fa.idx)]
if (len(cur_h_nei) > 0):
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(1, (- 1), self.hidden_size)
else:
cur_h_nei = zero_pad
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
h[(node_x.idx, node_fa.idx)] = new_h[0]
node_fa.neighbors.append(node_x)
stack.pop()
return (root, all_nodes) |
class MultipleLMDBManager():
def __init__(self, files: list, data_type, get_key=False, sync=True):
self.files = files
self._is_init = False
self.data_type = data_type
assert (data_type in decode_funcs)
self.get_key = get_key
if sync:
print('sync', files)
self.initialize()
else:
print('async', files)
preload(self)
def keep_subset(self, subset):
mapping = {key: self.mapping[key] for key in subset}
del self.mapping
self.mapping = mapping
def initialize(self):
self.mapping = {}
self.managers = {}
new_files = []
for old_file in self.files:
items = old_file.split('|')
file = items[0]
if (len(items) > 1):
prefix = items[1]
else:
prefix = None
if (not file.startswith('glob-')):
new_files.append(old_file)
else:
desc = remove_prefix(file, 'glob-')
sub_files = glob.glob(desc)
sub_files = sorted(sub_files)
if (prefix is not None):
sub_files = [f'{f}|{prefix}' for f in sub_files]
new_files.extend(sub_files)
self.files = new_files
for (i, old_file) in enumerate(self.files):
items = old_file.split('|')
file = items[0]
if (len(items) > 1):
prefix = items[1]
else:
prefix = None
if file.endswith('.lmdb'):
Manager = DBManager
elif file.endswith('.zip'):
Manager = ZipManager
elif file.endswith('.kv'):
Manager = KVReader
else:
raise
self.managers[i] = Manager(file, self.data_type, prefix=prefix, load=False)
print(file, ' done')
ThreadPool(4).run(preload, self.managers.values())
if self.get_key:
self._keys = []
for (index, manager) in self.managers.items():
file = manager.db_path
print(f'{file} loading')
logging.info(f'{file} loading')
keys = manager.keys
self._keys.extend(keys)
for key in keys:
self.mapping[key] = index
logging.info(f'{file} loaded, size = {len(keys)}')
print(f'{file} loaded, size = {len(keys)}')
self._is_init = True
def keys(self):
while (not self._is_init):
time.sleep(0.1)
return self._keys
def cleanup(self):
del self._keys
del self.mapping
def get(self, name, source=None):
if (source is None):
source = self.mapping[name]
data = self.managers[source].get(name)
return data |
def setup_axes(axes_amplitude=None, axes_phase=None):
if (axes_amplitude is not None):
axes_amplitude.set_ylabel('Amplitude ratio')
axes_amplitude.set_xscale('log')
axes_amplitude.set_yscale('log')
axes_amplitude.grid(True)
axes_amplitude.axhline(1.0, lw=0.5, color='black')
if (axes_phase is None):
axes_amplitude.set_xlabel('Frequency [Hz]')
axes_amplitude.set_xscale('log')
else:
axes_amplitude.xaxis.set_ticklabels([])
if (axes_phase is not None):
axes_phase.set_ylabel('Phase [$\\pi$]')
axes_phase.set_xscale('log')
axes_phase.set_xlabel('Frequency [Hz]')
axes_phase.grid(True)
axes_phase.axhline(0.0, lw=0.5, color='black') |
class depthDataset_iBims1(Dataset):
def __init__(self, imagelist, transform=None):
with open(imagelist) as f:
image_names = f.readlines()
self.image_names = [x.strip() for x in image_names]
self.transform = transform
def __getitem__(self, idx):
image_name = self.image_names[idx]
image_data = io.loadmat(('./data/iBims1/ibims1_core_mat/' + image_name))
data = image_data['data']
image = data['rgb'][0][0]
depth = data['depth'][0][0]
edges = data['edges'][0][0]
calib = data['calib'][0][0]
mask_invalid = data['mask_invalid'][0][0]
mask_transp = data['mask_transp'][0][0]
mask_wall = data['mask_wall'][0][0]
mask_wall_paras = data['mask_wall_paras'][0][0]
mask_table = data['mask_table'][0][0]
mask_table_paras = data['mask_table_paras'][0][0]
mask_floor = data['mask_floor'][0][0]
mask_floor_paras = data['mask_floor_paras'][0][0]
image = Image.fromarray(image)
depth = Image.fromarray(depth)
edges = Image.fromarray(edges)
calib = Image.fromarray(calib)
mask_invalid = Image.fromarray(mask_invalid)
mask_transp = Image.fromarray(mask_transp)
mask_wall = Image.fromarray(mask_wall)
mask_table = Image.fromarray(mask_table)
mask_floor = Image.fromarray(mask_floor)
sample = {'image': image, 'depth': depth, 'edges': edges, 'calib': calib, 'mask_invalid': mask_invalid, 'mask_transp': mask_transp, 'mask_wall': mask_wall, 'mask_wall_paras': mask_wall_paras, 'mask_table': mask_table, 'mask_table_paras': mask_table_paras, 'mask_floor': mask_floor, 'mask_floor_paras': mask_floor_paras}
if self.transform:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.image_names) |
class Solution(object):
def maxProfit(self, prices):
length = len(prices)
if (length == 0):
return 0
(max_profit, low) = (0, prices[0])
for i in range(1, length):
if (low > prices[i]):
low = prices[i]
else:
temp = (prices[i] - low)
if (temp > max_profit):
max_profit = temp
return max_profit |
class BaseOutputTest():
def __init__(self, model, param, disc, solution, operating_condition):
self.model = model
self.param = param
self.disc = disc
self.solution = solution
self.operating_condition = operating_condition
self.phase_name_n = ('' if (self.model.options.negative['particle phases'] == '1') else 'primary ')
self.phase_name_p = ('' if (self.model.options.positive['particle phases'] == '1') else 'primary ')
self.t = solution.t
geo = pybamm.geometric_parameters
self.x_n = disc.mesh['negative electrode'].nodes
self.x_s = disc.mesh['separator'].nodes
self.x_p = disc.mesh['positive electrode'].nodes
whole_cell = ['negative electrode', 'separator', 'positive electrode']
self.x = disc.mesh[whole_cell].nodes
self.x_n_edge = disc.mesh['negative electrode'].edges
self.x_s_edge = disc.mesh['separator'].edges
self.x_p_edge = disc.mesh['positive electrode'].edges
self.x_edge = disc.mesh[whole_cell].edges
if isinstance(self.model, pybamm.lithium_ion.BaseModel):
self.r_n = disc.mesh['negative particle'].nodes
self.r_p = disc.mesh['positive particle'].nodes
self.r_n_edge = disc.mesh['negative particle'].edges
self.r_p_edge = disc.mesh['positive particle'].edges
if (self.model.options['particle size'] == 'distribution'):
self.R_n = disc.mesh['negative particle size'].nodes
self.R_p = disc.mesh['positive particle size'].nodes
self.L_n = param.evaluate(geo.n.L)
self.L_p = param.evaluate(geo.p.L)
current_param = self.model.param.current_density_with_time
self.i_cell = param.process_symbol(current_param).evaluate(solution.t) |
def add_preprocess_args(parser):
group = parser.add_argument_group('Preprocessing')
group.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
group.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
group.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix')
group.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes')
group.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes')
group.add_argument('--align-suffix', metavar='FP', default=None, help='alignment file suffix')
group.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')
group.add_argument('--thresholdtgt', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown')
group.add_argument('--thresholdsrc', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown')
group.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
group.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
group.add_argument('--nwordstgt', metavar='N', default=(- 1), type=int, help='number of target words to retain')
group.add_argument('--nwordssrc', metavar='N', default=(- 1), type=int, help='number of source words to retain')
group.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation')
group.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')
group.add_argument('--only-source', action='store_true', help='Only process the source language')
group.add_argument('--padding-factor', metavar='N', default=8, type=int, help='Pad dictionary size to be multiple of N')
group.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers')
return parser |
def _generate_default_transformer_epoch_optim_loop_asset(file, image_loader, transformer, criterion, criterion_update_fn, epochs, get_lr_scheduler, get_optimizer=None):
input_transformer = deepcopy(transformer)
if (get_optimizer is None):
get_optimizer = default_model_optimizer
optimizer = get_optimizer(transformer)
lr_scheduler = get_lr_scheduler(optimizer)
for epoch in range(epochs):
for target_image in image_loader:
criterion_update_fn(target_image, criterion)
input_image = transformer(target_image)
def closure():
optimizer.zero_grad()
loss = criterion(input_image)
loss.backward()
return loss
optimizer.step(closure)
lr_scheduler.step()
input = {'image_loader': image_loader, 'transformer': input_transformer, 'criterion': criterion, 'criterion_update_fn': criterion_update_fn, 'epochs': epochs}
params = {'get_optimizer': get_optimizer, 'get_lr_scheduler': get_lr_scheduler}
output = {'transformer': transformer}
store_asset(input, params, output, file) |
class Munkres():
def __init__(self):
self.C = None
self.row_covered = []
self.col_covered = []
self.n = 0
self.Z0_r = 0
self.Z0_c = 0
self.marked = None
self.path = None
def make_cost_matrix(profit_matrix, inversion_function):
import munkres
return munkres.make_cost_matrix(profit_matrix, inversion_function)
make_cost_matrix = staticmethod(make_cost_matrix)
def pad_matrix(self, matrix, pad_value=0):
max_columns = 0
total_rows = len(matrix)
for row in matrix:
max_columns = max(max_columns, len(row))
total_rows = max(max_columns, total_rows)
new_matrix = []
for row in matrix:
row_len = len(row)
new_row = row[:]
if (total_rows > row_len):
new_row += ([0] * (total_rows - row_len))
new_matrix += [new_row]
while (len(new_matrix) < total_rows):
new_matrix += [([0] * total_rows)]
return new_matrix
def compute(self, cost_matrix):
self.C = self.pad_matrix(cost_matrix)
self.n = len(self.C)
self.original_length = len(cost_matrix)
self.original_width = len(cost_matrix[0])
self.row_covered = [False for i in range(self.n)]
self.col_covered = [False for i in range(self.n)]
self.Z0_r = 0
self.Z0_c = 0
self.path = self.__make_matrix((self.n * 2), 0)
self.marked = self.__make_matrix(self.n, 0)
done = False
step = 1
steps = {1: self.__step1, 2: self.__step2, 3: self.__step3, 4: self.__step4, 5: self.__step5, 6: self.__step6}
while (not done):
try:
func = steps[step]
step = func()
except KeyError:
done = True
results = []
for i in range(self.original_length):
for j in range(self.original_width):
if (self.marked[i][j] == 1):
results += [(i, j)]
return results
def __copy_matrix(self, matrix):
return copy.deepcopy(matrix)
def __make_matrix(self, n, val):
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
def __step1(self):
C = self.C
n = self.n
for i in range(n):
minval = min(self.C[i])
for j in range(n):
self.C[i][j] -= minval
return 2
def __step2(self):
n = self.n
for i in range(n):
for j in range(n):
if ((self.C[i][j] == 0) and (not self.col_covered[j]) and (not self.row_covered[i])):
self.marked[i][j] = 1
self.col_covered[j] = True
self.row_covered[i] = True
self.__clear_covers()
return 3
def __step3(self):
n = self.n
count = 0
for i in range(n):
for j in range(n):
if (self.marked[i][j] == 1):
self.col_covered[j] = True
count += 1
if (count >= n):
step = 7
else:
step = 4
return step
def __step4(self):
step = 0
done = False
row = (- 1)
col = (- 1)
star_col = (- 1)
while (not done):
(row, col) = self.__find_a_zero()
if (row < 0):
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if (star_col >= 0):
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
def __step5(self):
count = 0
path = self.path
path[count][0] = self.Z0_r
path[count][1] = self.Z0_c
done = False
while (not done):
row = self.__find_star_in_col(path[count][1])
if (row >= 0):
count += 1
path[count][0] = row
path[count][1] = path[(count - 1)][1]
else:
done = True
if (not done):
col = self.__find_prime_in_row(path[count][0])
count += 1
path[count][0] = path[(count - 1)][0]
path[count][1] = col
self.__convert_path(path, count)
self.__clear_covers()
self.__erase_primes()
return 3
def __step6(self):
minval = self.__find_smallest()
for i in range(self.n):
for j in range(self.n):
if self.row_covered[i]:
self.C[i][j] += minval
if (not self.col_covered[j]):
self.C[i][j] -= minval
return 4
def __find_smallest(self):
minval = .0
for i in range(self.n):
for j in range(self.n):
if ((not self.row_covered[i]) and (not self.col_covered[j])):
if (minval > self.C[i][j]):
minval = self.C[i][j]
return minval
def __find_a_zero(self):
row = (- 1)
col = (- 1)
i = 0
n = self.n
done = False
while (not done):
j = 0
while True:
if ((self.C[i][j] == 0) and (not self.row_covered[i]) and (not self.col_covered[j])):
row = i
col = j
done = True
j += 1
if (j >= n):
break
i += 1
if (i >= n):
done = True
return (row, col)
def __find_star_in_row(self, row):
col = (- 1)
for j in range(self.n):
if (self.marked[row][j] == 1):
col = j
break
return col
def __find_star_in_col(self, col):
row = (- 1)
for i in range(self.n):
if (self.marked[i][col] == 1):
row = i
break
return row
def __find_prime_in_row(self, row):
col = (- 1)
for j in range(self.n):
if (self.marked[row][j] == 2):
col = j
break
return col
def __convert_path(self, path, count):
for i in range((count + 1)):
if (self.marked[path[i][0]][path[i][1]] == 1):
self.marked[path[i][0]][path[i][1]] = 0
else:
self.marked[path[i][0]][path[i][1]] = 1
def __clear_covers(self):
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
def __erase_primes(self):
for i in range(self.n):
for j in range(self.n):
if (self.marked[i][j] == 2):
self.marked[i][j] = 0 |
class BTOOLS_OT_add_array(bpy.types.Operator):
bl_idname = 'btools.add_array'
bl_label = 'Add Array'
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
def poll(cls, context):
return (context.mode == 'OBJECT')
def execute(self, context):
Array.build(context)
return {'FINISHED'} |
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(400, 413)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.vboxlayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.vboxlayout.setContentsMargins(9, 9, 9, 9)
self.vboxlayout.setSpacing(6)
self.vboxlayout.setObjectName('vboxlayout')
self.mainFrame = QtWidgets.QFrame(self.centralwidget)
self.mainFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.mainFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.mainFrame.setObjectName('mainFrame')
self.gridlayout = QtWidgets.QGridLayout(self.mainFrame)
self.gridlayout.setContentsMargins(9, 9, 9, 9)
self.gridlayout.setSpacing(6)
self.gridlayout.setObjectName('gridlayout')
self.agreeCheckBox = QtWidgets.QCheckBox(self.mainFrame)
self.agreeCheckBox.setObjectName('agreeCheckBox')
self.gridlayout.addWidget(self.agreeCheckBox, 6, 0, 1, 5)
self.label = QtWidgets.QLabel(self.mainFrame)
self.label.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTop) | QtCore.Qt.AlignTrailing))
self.label.setObjectName('label')
self.gridlayout.addWidget(self.label, 5, 0, 1, 1)
self.nameLabel = QtWidgets.QLabel(self.mainFrame)
self.nameLabel.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.nameLabel.setObjectName('nameLabel')
self.gridlayout.addWidget(self.nameLabel, 0, 0, 1, 1)
self.maleRadioButton = QtWidgets.QRadioButton(self.mainFrame)
self.maleRadioButton.setObjectName('maleRadioButton')
self.gridlayout.addWidget(self.maleRadioButton, 1, 1, 1, 1)
self.passwordLabel = QtWidgets.QLabel(self.mainFrame)
self.passwordLabel.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.passwordLabel.setObjectName('passwordLabel')
self.gridlayout.addWidget(self.passwordLabel, 3, 0, 1, 1)
self.countryCombo = QtWidgets.QComboBox(self.mainFrame)
self.countryCombo.setObjectName('countryCombo')
self.countryCombo.addItem('')
self.countryCombo.addItem('')
self.countryCombo.addItem('')
self.countryCombo.addItem('')
self.countryCombo.addItem('')
self.countryCombo.addItem('')
self.countryCombo.addItem('')
self.gridlayout.addWidget(self.countryCombo, 4, 1, 1, 4)
self.ageLabel = QtWidgets.QLabel(self.mainFrame)
self.ageLabel.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.ageLabel.setObjectName('ageLabel')
self.gridlayout.addWidget(self.ageLabel, 2, 0, 1, 1)
self.countryLabel = QtWidgets.QLabel(self.mainFrame)
self.countryLabel.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.countryLabel.setObjectName('countryLabel')
self.gridlayout.addWidget(self.countryLabel, 4, 0, 1, 1)
self.genderLabel = QtWidgets.QLabel(self.mainFrame)
self.genderLabel.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.genderLabel.setObjectName('genderLabel')
self.gridlayout.addWidget(self.genderLabel, 1, 0, 1, 1)
self.passwordEdit = QtWidgets.QLineEdit(self.mainFrame)
self.passwordEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.passwordEdit.setObjectName('passwordEdit')
self.gridlayout.addWidget(self.passwordEdit, 3, 1, 1, 4)
self.femaleRadioButton = QtWidgets.QRadioButton(self.mainFrame)
self.femaleRadioButton.setObjectName('femaleRadioButton')
self.gridlayout.addWidget(self.femaleRadioButton, 1, 2, 1, 2)
self.ageSpinBox = QtWidgets.QSpinBox(self.mainFrame)
self.ageSpinBox.setMinimum(12)
self.ageSpinBox.setProperty('value', 22)
self.ageSpinBox.setObjectName('ageSpinBox')
self.gridlayout.addWidget(self.ageSpinBox, 2, 1, 1, 2)
self.nameCombo = QtWidgets.QComboBox(self.mainFrame)
self.nameCombo.setEditable(True)
self.nameCombo.setObjectName('nameCombo')
self.gridlayout.addWidget(self.nameCombo, 0, 1, 1, 4)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem, 1, 4, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(61, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem1, 2, 3, 1, 2)
self.buttonBox = QtWidgets.QDialogButtonBox(self.mainFrame)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(((QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.NoButton) | QtWidgets.QDialogButtonBox.Ok))
self.buttonBox.setObjectName('buttonBox')
self.gridlayout.addWidget(self.buttonBox, 7, 3, 1, 2)
self.professionList = QtWidgets.QListWidget(self.mainFrame)
self.professionList.setObjectName('professionList')
item = QtWidgets.QListWidgetItem()
self.professionList.addItem(item)
item = QtWidgets.QListWidgetItem()
self.professionList.addItem(item)
item = QtWidgets.QListWidgetItem()
self.professionList.addItem(item)
self.gridlayout.addWidget(self.professionList, 5, 1, 1, 4)
self.vboxlayout.addWidget(self.mainFrame)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 400, 29))
self.menubar.setObjectName('menubar')
self.menu_File = QtWidgets.QMenu(self.menubar)
self.menu_File.setObjectName('menu_File')
self.menu_Help = QtWidgets.QMenu(self.menubar)
self.menu_Help.setObjectName('menu_Help')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.exitAction = QtWidgets.QAction(MainWindow)
self.exitAction.setObjectName('exitAction')
self.aboutQtAction = QtWidgets.QAction(MainWindow)
self.aboutQtAction.setObjectName('aboutQtAction')
self.editStyleAction = QtWidgets.QAction(MainWindow)
self.editStyleAction.setObjectName('editStyleAction')
self.aboutAction = QtWidgets.QAction(MainWindow)
self.aboutAction.setObjectName('aboutAction')
self.menu_File.addAction(self.editStyleAction)
self.menu_File.addSeparator()
self.menu_File.addAction(self.exitAction)
self.menu_Help.addAction(self.aboutAction)
self.menu_Help.addAction(self.aboutQtAction)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.label.setBuddy(self.professionList)
self.nameLabel.setBuddy(self.nameCombo)
self.passwordLabel.setBuddy(self.passwordEdit)
self.ageLabel.setBuddy(self.ageSpinBox)
self.countryLabel.setBuddy(self.countryCombo)
self.retranslateUi(MainWindow)
self.countryCombo.setCurrentIndex(6)
self.professionList.setCurrentRow(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'Style Sheet'))
self.agreeCheckBox.setToolTip(_translate('MainWindow', 'Please read the LICENSE file before checking'))
self.agreeCheckBox.setText(_translate('MainWindow', 'I accept the terms and &conditions'))
self.label.setText(_translate('MainWindow', 'Profession:'))
self.nameLabel.setText(_translate('MainWindow', '&Name:'))
self.maleRadioButton.setToolTip(_translate('MainWindow', 'Check this if you are male'))
self.maleRadioButton.setText(_translate('MainWindow', '&Male'))
self.passwordLabel.setText(_translate('MainWindow', '&Password:'))
self.countryCombo.setToolTip(_translate('MainWindow', 'Specify country of origin'))
self.countryCombo.setStatusTip(_translate('MainWindow', 'Specify country of origin'))
self.countryCombo.setItemText(0, _translate('MainWindow', 'Egypt'))
self.countryCombo.setItemText(1, _translate('MainWindow', 'France'))
self.countryCombo.setItemText(2, _translate('MainWindow', 'Germany'))
self.countryCombo.setItemText(3, _translate('MainWindow', 'India'))
self.countryCombo.setItemText(4, _translate('MainWindow', 'Italy'))
self.countryCombo.setItemText(5, _translate('MainWindow', 'Norway'))
self.countryCombo.setItemText(6, _translate('MainWindow', 'Pakistan'))
self.ageLabel.setText(_translate('MainWindow', '&Age:'))
self.countryLabel.setText(_translate('MainWindow', 'Country:'))
self.genderLabel.setText(_translate('MainWindow', 'Gender:'))
self.passwordEdit.setToolTip(_translate('MainWindow', 'Specify your password'))
self.passwordEdit.setStatusTip(_translate('MainWindow', 'Specify your password'))
self.passwordEdit.setText(_translate('MainWindow', 'Password'))
self.femaleRadioButton.setToolTip(_translate('MainWindow', 'Check this if you are female'))
self.femaleRadioButton.setText(_translate('MainWindow', '&Female'))
self.ageSpinBox.setToolTip(_translate('MainWindow', 'Specify your age'))
self.ageSpinBox.setStatusTip(_translate('MainWindow', 'Specify your age'))
self.nameCombo.setToolTip(_translate('MainWindow', 'Specify your name'))
self.professionList.setToolTip(_translate('MainWindow', 'Select your profession'))
self.professionList.setStatusTip(_translate('MainWindow', 'Specify your name here'))
self.professionList.setWhatsThis(_translate('MainWindow', 'Specify your name here'))
__sortingEnabled = self.professionList.isSortingEnabled()
self.professionList.setSortingEnabled(False)
item = self.professionList.item(0)
item.setText(_translate('MainWindow', 'Developer'))
item = self.professionList.item(1)
item.setText(_translate('MainWindow', 'Student'))
item = self.professionList.item(2)
item.setText(_translate('MainWindow', 'Fisherman'))
self.professionList.setSortingEnabled(__sortingEnabled)
self.menu_File.setTitle(_translate('MainWindow', '&File'))
self.menu_Help.setTitle(_translate('MainWindow', '&Help'))
self.exitAction.setText(_translate('MainWindow', '&Exit'))
self.aboutQtAction.setText(_translate('MainWindow', 'About Qt'))
self.editStyleAction.setText(_translate('MainWindow', 'Edit &Style...'))
self.aboutAction.setText(_translate('MainWindow', 'About')) |
def backpressure(func, *argslist, inflight_limit=1000, **kwargs):
result_refs = []
for (i, args) in enumerate(zip(*argslist)):
if (len(result_refs) > inflight_limit):
num_ready = (i - inflight_limit)
ray.wait(result_refs, num_returns=num_ready)
result_refs.append(func.remote(*args, **kwargs))
return result_refs |
class TestDwf(TestCase):
def test_00_new_node_type(self):
self.assertNotIn(199, CUSTOM_NODE_TYPES, 'Initially there should be no custom node with id 199')
idx = new_node_type(node_id=199)
self.assertIsNotNone(idx)
with self.assertRaises(AssertionError):
new_node_type(idx)
n = new_node_type((idx + 100))
self.assertEqual(n, (idx + 100))
def test_01_dwf(self):
def hrprinter_walk_XOR(self, formula):
self.stream.write('(')
(yield formula.arg(0))
self.stream.write(' *+* ')
(yield formula.arg(1))
self.stream.write(')')
add_dwf = get_env().add_dynamic_walker_function
create_node = get_env().formula_manager.create_node
XOR = new_node_type()
add_dwf(XOR, SimpleTypeChecker, SimpleTypeChecker.walk_bool_to_bool)
add_dwf(XOR, HRPrinter, hrprinter_walk_XOR)
x = Symbol('x')
f1 = create_node(node_type=XOR, args=(x, x))
self.assertIsNotNone(f1)
s_f1 = str(f1)
self.assertEqual(s_f1, '(x *+* x)')
with self.assertRaises(UnsupportedOperatorError):
f1.simplify()
def test_02_all_types(self):
old_types_set = set(all_types())
new_t = new_node_type()
new_types_set = set(all_types())
self.assertEqual((new_types_set - old_types_set), set([new_t])) |
def get_parsed_context(args):
logger.debug('starting')
if (not args):
logger.debug('pipeline invoked without context arg set. For this dict parser you can use something like:\npypyr pipelinename key1=value1 key2=value2')
return {'argDict': {}}
return {'argDict': {k: v for (k, _, v) in (element.partition('=') for element in args)}} |
class EmojiParserOptions(ParserOptions):
major_tags: Tuple[(str, ...)] = (':boom:',)
minor_tags: Tuple[(str, ...)] = (':sparkles:', ':children_crossing:', ':lipstick:', ':iphone:', ':egg:', ':chart_with_upwards_trend:')
patch_tags: Tuple[(str, ...)] = (':ambulance:', ':lock:', ':bug:', ':zap:', ':goal_net:', ':alien:', ':wheelchair:', ':speech_balloon:', ':mag:', ':apple:', ':penguin:', ':checkered_flag:', ':robot:', ':green_apple:')
default_bump_level: LevelBump = LevelBump.NO_RELEASE |
def get_data(data_path):
with open(data_path, 'rb') as f:
train_test_paths_labels = pickle.load(f)
train_paths_80 = train_test_paths_labels[0]
val_paths_80 = train_test_paths_labels[1]
train_labels_80 = train_test_paths_labels[2]
val_labels_80 = train_test_paths_labels[3]
train_num_each_80 = train_test_paths_labels[4]
val_num_each_80 = train_test_paths_labels[5]
test_paths_80 = train_test_paths_labels[6]
test_labels_80 = train_test_paths_labels[7]
test_num_each_80 = train_test_paths_labels[8]
print('train_paths_80 : {:6d}'.format(len(train_paths_80)))
print('train_labels_80 : {:6d}'.format(len(train_labels_80)))
print('valid_paths_80 : {:6d}'.format(len(val_paths_80)))
print('valid_labels_80 : {:6d}'.format(len(val_labels_80)))
train_labels_80 = np.asarray(train_labels_80, dtype=np.int64)
val_labels_80 = np.asarray(val_labels_80, dtype=np.int64)
test_labels_80 = np.asarray(test_labels_80, dtype=np.int64)
train_start_vidx = []
count = 0
for i in range(len(train_num_each_80)):
train_start_vidx.append(count)
count += train_num_each_80[i]
val_start_vidx = []
count = 0
for i in range(len(val_num_each_80)):
val_start_vidx.append(count)
count += val_num_each_80[i]
test_start_vidx = []
count = 0
for i in range(len(test_num_each_80)):
test_start_vidx.append(count)
count += test_num_each_80[i]
return (train_labels_80, train_num_each_80, train_start_vidx, val_labels_80, val_num_each_80, val_start_vidx, test_labels_80, test_num_each_80, test_start_vidx) |
def get_files(**kwargs):
return [File(Path('LICENSE.txt'), MIT.replace('<year>', f"{kwargs['year']}-present", 1).replace('<copyright holders>', f"{kwargs['author']} <{kwargs['email']}>", 1)), File(Path('src', kwargs['package_name'], '__init__.py'), f'''# SPDX-FileCopyrightText: {kwargs['year']}-present {kwargs['author']} <{kwargs['email']}>
#
# SPDX-License-Identifier: MIT
'''), File(Path('src', kwargs['package_name'], '__about__.py'), f'''# SPDX-FileCopyrightText: {kwargs['year']}-present {kwargs['author']} <{kwargs['email']}>
#
# SPDX-License-Identifier: MIT
__version__ = "0.0.1"
'''), File(Path('README.md'), f'''# {kwargs['project_name']}
[
- [License](#license)
## Installation
```console
pip install {kwargs['project_name_normalized']}
```
## License
`{kwargs['project_name_normalized']}` is distributed under the terms of the [MIT]( license.
'''), File(Path('pyproject.toml'), f'''[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "{kwargs['project_name_normalized']}"
dynamic = ["version"]
description = ''
readme = "README.md"
requires-python = ">=3.8"
license = "MIT"
keywords = []
authors = [
{{ name = "{kwargs['author']}", email = "{kwargs['email']}" }},
]
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = []
[project.urls]
Documentation = "
Issues = "
Source = "
[tool.hatch.version]
path = "src/{kwargs['package_name']}/__about__.py"
''')] |
class Effect804(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
rawAttr = module.item.getAttribute('capacitorNeed')
if ((rawAttr is not None) and (rawAttr >= 0)):
module.boostItemAttr('capacitorNeed', (module.getModifiedChargeAttr('capNeedBonus') or 0), **kwargs) |
def _calcparams_correct_Python_type_check(out_value, numeric_args):
if any((isinstance(a, pd.Series) for a in numeric_args)):
return isinstance(out_value, pd.Series)
elif any((isinstance(a, np.ndarray) for a in numeric_args)):
return isinstance(out_value, np.ndarray)
return np.isscalar(out_value) |
_model()
_legacy_interface(weights=('pretrained', ResNet101_Weights.IMAGENET1K_V1))
def resnet101(*, weights: Optional[ResNet101_Weights]=None, progress: bool=True, **kwargs: Any) -> ResNet:
weights = ResNet101_Weights.verify(weights)
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs) |
def when_program_starts_1(self):
while True:
self.glide_to_random_position(1.0)
self.point_towards_mouse_pointer()
self.if_on_edge_bounce()
self.wait(1.0)
if self.list_contains_item('l', 't'):
self.delete_all_from_list('l')
self.add_value_to_list('l', 'list')
self.add_value_to_list('l', 'owl') |
class RepositoryNotification(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
repository = ForeignKeyField(Repository)
event = EnumField(ExternalNotificationEvent)
method = EnumField(ExternalNotificationMethod)
title = CharField(null=True)
config_json = TextField()
event_config_json = TextField(default='{}')
number_of_failures = IntegerField(default=0) |
class TestAbstractmethod(TestCase):
def testInit(self):
class ABCTest(metaclass=ABCMeta):
def meth(self):
pass
assert (ABCTest.meth.__name__ == 'meth')
with self.assertRaises(ABCException) as exc:
abstractmethod(1)
assert (exc.value == 'Function is not callable: 1') |
class TypedListType(CType):
def __init__(self, ttype, depth=0):
if (depth < 0):
raise ValueError('Please specify a depth superior or equal to 0')
if (not isinstance(ttype, Type)):
raise TypeError('Expected an PyTensor Type')
if (depth == 0):
self.ttype = ttype
else:
self.ttype = TypedListType(ttype, (depth - 1))
def filter(self, x, strict=False, allow_downcast=None):
if strict:
if (not isinstance(x, list)):
raise TypeError('Expected a python list')
else:
x = [self.ttype.filter(y) for y in x]
if all((self.ttype.is_valid_value(y) for y in x)):
return x
else:
raise TypeError(f'Expected all elements to be {self.ttype}')
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ttype == other.ttype))
def __hash__(self):
return hash((type(self), self.ttype))
def __str__(self):
return (('TypedList <' + str(self.ttype)) + '>')
def get_depth(self):
if isinstance(self.ttype, TypedListType):
return (self.ttype.get_depth() + 1)
else:
return 0
def values_eq(self, a, b):
if (len(a) != len(b)):
return False
for x in range(len(a)):
if (not self.ttype.values_eq(a[x], b[x])):
return False
return True
def may_share_memory(self, a, b):
if (a is b):
return True
if (not isinstance(a, list)):
a = [a]
if (not isinstance(b, list)):
b = [b]
for idx1 in range(len(a)):
for idx2 in range(len(b)):
if self.ttype.may_share_memory(a[idx1], b[idx2]):
return True
def c_declare(self, name, sub, check_input=True):
return f'''
PyListObject* {name};
'''
def c_init(self, name, sub):
return f'''
{name} = NULL;
'''
def c_extract(self, name, sub, check_input=True, **kwargs):
if check_input:
pre = ('\n if (!PyList_Check(py_%(name)s)) {\n PyErr_SetString(PyExc_TypeError, "expected a list");\n %(fail)s\n }' % dict(name=name, fail=sub['fail']))
else:
pre = ''
return (pre + ('\n %(name)s = (PyListObject*) (py_%(name)s);\n ' % dict(name=name, fail=sub['fail'])))
def c_sync(self, name, sub):
return ('\n Py_XDECREF(py_%(name)s);\n py_%(name)s = (PyObject*)(%(name)s);\n Py_INCREF(py_%(name)s);\n ' % dict(name=name))
def c_cleanup(self, name, sub):
return ''
def c_code_cache_version(self):
return (2,)
dtype = property((lambda self: self.ttype))
ndim = property((lambda self: (self.ttype.ndim + 1))) |
class Net(torch.nn.Module):
def __init__(self, input_size=784, hidden_size=500, num_classes=10):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1):
out = self.fc1(input1)
out = self.relu(out)
out = self.fc2(out)
return out |
def get_bucket_files(glob_pattern, base_dir, force=False, pattern_slice=None):
if (pattern_slice is None):
pattern_slice = slice(None)
if (gcsfs is None):
raise RuntimeError("Missing 'gcsfs' dependency for GCS download.")
if (not os.path.isdir(base_dir)):
raise OSError('Directory does not exist: {}'.format(base_dir))
if isinstance(glob_pattern, str):
glob_pattern = [glob_pattern]
fs = gcsfs.GCSFileSystem(token='anon')
filenames = []
for gp in glob_pattern:
if isinstance(gp, str):
glob_results = list(fs.glob(gp))
else:
glob_results = [fn for pat in gp for fn in fs.glob(pat)]
filenames.extend(_download_gcs_files(glob_results[pattern_slice], fs, base_dir, force))
if (not filenames):
raise OSError('No files could be found or downloaded.')
return filenames |
def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False):
data_format = ('NCHW' if use_nchw else 'NHWC')
sample_idx = None
with tf.variable_scope(scope) as sc:
if group_all:
nsample = xyz.get_shape()[1].value
(new_xyz, new_points, idx, grouped_xyz) = sample_and_group_all(xyz, points, use_xyz)
else:
(new_xyz, new_points, idx, sample_idx, grouped_xyz) = sample_and_group(npoint, radius, nsample, xyz, points, knn, use_xyz)
if use_nchw:
new_points = tf.transpose(new_points, [0, 3, 1, 2])
for (i, num_out_channel) in enumerate(mlp):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv%d' % i), bn_decay=bn_decay, data_format=data_format)
if use_nchw:
new_points = tf.transpose(new_points, [0, 2, 3, 1])
if (pooling == 'max'):
new_points = tf.reduce_max(new_points, axis=[2], keepdims=True, name='maxpool')
elif (pooling == 'avg'):
new_points = tf.reduce_mean(new_points, axis=[2], keepdims=True, name='avgpool')
elif (pooling == 'weighted_avg'):
with tf.variable_scope('weighted_avg'):
dists = tf.norm(grouped_xyz, axis=(- 1), ord=2, keepdims=True)
exp_dists = tf.exp(((- dists) * 5))
weights = (exp_dists / tf.reduce_sum(exp_dists, axis=2, keepdims=True))
new_points *= weights
new_points = tf.reduce_sum(new_points, axis=2, keepdims=True)
elif (pooling == 'max_and_avg'):
max_points = tf.reduce_max(new_points, axis=[2], keepdims=True, name='maxpool')
avg_points = tf.reduce_mean(new_points, axis=[2], keepdims=True, name='avgpool')
new_points = tf.concat([avg_points, max_points], axis=(- 1))
if (mlp2 is not None):
if use_nchw:
new_points = tf.transpose(new_points, [0, 3, 1, 2])
for (i, num_out_channel) in enumerate(mlp2):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv_post_%d' % i), bn_decay=bn_decay, data_format=data_format)
if use_nchw:
new_points = tf.transpose(new_points, [0, 2, 3, 1])
new_points = tf.squeeze(new_points, [2])
if (sample_idx is not None):
return (new_xyz, new_points, (idx, sample_idx))
else:
return (new_xyz, new_points, idx) |
class FixedPortfolioPercentagePositionSizer(PositionSizer):
def __init__(self, broker: Broker, data_provider: DataProvider, order_factory: OrderFactory, signals_register: SignalsRegister, fixed_percentage: float, tolerance_percentage: float=0.0):
super().__init__(broker, data_provider, order_factory, signals_register)
self.fixed_percentage = fixed_percentage
self.tolerance_percentage = tolerance_percentage
def _generate_market_orders(self, signals: List[Signal], time_in_force: TimeInForce, frequency: Frequency=None) -> List[Optional[Order]]:
target_percentages = {self._get_specific_ticker(signal.ticker): (signal.suggested_exposure.value * self.fixed_percentage) for signal in signals}
market_order_list = self._order_factory.target_percent_orders(target_percentages, MarketOrder(), time_in_force, self.tolerance_percentage, frequency)
return market_order_list |
class DisconnectedType(Type):
def filter(self, data, strict=False, allow_downcast=None):
raise AssertionError("If you're assigning to a DisconnectedType you're doing something wrong. It should only be used as a symbolic placeholder.")
def fiter_variable(self, other):
raise AssertionError("If you're assigning to a DisconnectedType you're doing something wrong. It should only be used as a symbolic placeholder.")
def may_share_memory(a, b):
return False
def value_eq(a, b, force_same_dtype=True):
raise AssertionError("If you're assigning to a DisconnectedType you're doing something wrong. It should only be used as a symbolic placeholder.")
def __str__(self):
return 'DisconnectedType' |
def pre_trained_model_to_finetune(checkpoint, args):
checkpoint = checkpoint['model']
num_layers = ((args.dec_layers + 1) if args.two_stage else args.dec_layers)
for l in range(num_layers):
del checkpoint['class_embed.{}.weight'.format(l)]
del checkpoint['class_embed.{}.bias'.format(l)]
return checkpoint |
def extract_mid_stage_label_dataframe(dataset_filename):
if (type(dataset_filename) == str):
logging.info('Dataset: {}'.format(dataset_filename))
annotated_dataset = get_dataset(dataset_filename)
else:
annotated_dataset = []
logging.info('Datasets : {}'.format(', '.join((f for f in dataset_filename))))
for dset in dataset_filename:
annotated_dataset.extend(get_dataset(dset))
initialize_globals()
sorted_grammar_list = get_grammar()
mid_training_data = []
Y_PRED = []
Y_TRUE = []
for row in tqdm(annotated_dataset):
sentence = row['sentence']
logging.debug(('sentence: ' + sentence))
meta = {key: value for (key, value) in row['meta'].items() if (key != 'null')}
expected_meta_form = set(sorted(meta.items()))
ste = SourceTargetExtractor(sentence)
list_of_extracted_meta = list()
for (index, (_, compiled_grammar)) in enumerate(sorted_grammar_list):
score_dict = ste.get_topic_sentiment_score_dict(compiled_grammar)
extracted_meta = get_polarity_form_result(score_dict)
extracted_ote = set(extracted_meta.items())
list_of_extracted_meta.append(extracted_ote)
(mid_training_label, max_match_extracted) = get_max_combination(list_of_extracted_meta, expected_meta_form)
(y_pred_index, y_true_index) = get_y_pred_and_y_true_label(expected_meta_form, max_match_extracted)
Y_TRUE.extend(y_true_index)
Y_PRED.extend(y_pred_index)
mid_training_data.append([sentence, meta, max_match_extracted, mid_training_label])
print('For Data-set: ', dataset_filename, '\n', classification_report(Y_TRUE, Y_PRED))
df = pd.DataFrame(mid_training_data, columns=['sentence', 'meta', 'max_match_extracted', 'y_true'])
return df |
class PullRequestEQTest(TestCase):
def test_is_eq(self):
pr1 = pullrequest_factory('yay', number=1)
pr2 = pullrequest_factory('yay', number=2)
self.assertNotEqual(pr1, pr2)
pr1.number = pr2.number
self.assertEqual(pr1, pr2)
pr1.number = 3
self.assertNotEqual(pr1, pr2)
pr2.number = 3
self.assertEqual(pr1, pr2) |
def eval(config, index_arg, verbose=0):
((train_x, train_y), (test_x, test_y)) = load_imdb()
if config['use_mixed']:
cell = MixedCfcCell(units=config['size'], hparams=config)
else:
cell = CfcCell(units=config['size'], hparams=config)
inputs = tf.keras.layers.Input(shape=(maxlen,))
token_emb = tf.keras.layers.Embedding(input_dim=vocab_size, output_dim=config['embed_dim'])
cell_input = token_emb(inputs)
cell_input = tf.keras.layers.Dropout(config['embed_dr'])(cell_input)
rnn = tf.keras.layers.RNN(cell, time_major=False, return_sequences=False)
dense_layer = tf.keras.layers.Dense(10)
output_states = rnn(cell_input)
y = dense_layer(output_states)
model = tf.keras.Model(inputs, y)
base_lr = config['base_lr']
decay_lr = config['decay_lr']
train_steps = (train_x.shape[0] // config['batch_size'])
learning_rate_fn = tf.keras.optimizers.schedules.ExponentialDecay(base_lr, train_steps, decay_lr)
opt = (tf.keras.optimizers.Adam if (config['optimizer'] == 'adam') else tf.keras.optimizers.RMSprop)
optimizer = opt(learning_rate_fn, clipnorm=config['clipnorm'])
model.compile(optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
hist = model.fit(x=train_x, y=train_y, batch_size=config['batch_size'], epochs=config['epochs'], validation_data=((test_x, test_y) if verbose else None), verbose=verbose)
(_, test_accuracy) = model.evaluate(test_x, test_y, verbose=0)
return test_accuracy |
class TCRA(TestCase):
def test_upgrade(self):
frame = CRA(owner='a', preview_start=1, preview_length=2, data=b'foo')
new = AENC(frame)
self.assertEqual(new.owner, 'a')
self.assertEqual(new.preview_start, 1)
self.assertEqual(new.preview_length, 2)
self.assertEqual(new.data, b'foo') |
class TokenizerTesterMixin():
tokenizer_class = None
rust_tokenizer_class = None
test_slow_tokenizer = True
test_rust_tokenizer = True
space_between_special_tokens = False
from_pretrained_kwargs = None
from_pretrained_filter = None
from_pretrained_vocab_key = 'vocab_file'
test_seq2seq = True
test_sentencepiece = False
test_sentencepiece_ignore_case = False
def setUp(self) -> None:
if self.test_rust_tokenizer:
tokenizers_list = [(self.rust_tokenizer_class, pretrained_name, (self.from_pretrained_kwargs if (self.from_pretrained_kwargs is not None) else {})) for pretrained_name in self.rust_tokenizer_class.pretrained_vocab_files_map[self.from_pretrained_vocab_key].keys() if ((self.from_pretrained_filter is None) or ((self.from_pretrained_filter is not None) and self.from_pretrained_filter(pretrained_name)))]
self.tokenizers_list = tokenizers_list[:1]
else:
self.tokenizers_list = []
with open(f'{get_tests_dir()}/fixtures/sample_text.txt', encoding='utf-8') as f_data:
self._data = f_data.read().replace('\n\n', '\n').strip()
self.tmpdirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_txt = self.get_clean_sequence(tokenizer)[0]
return (input_txt, input_txt)
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[(str, list)]:
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter((lambda t: re.match('^[ a-zA-Z]+$', t[1])), toks))
toks = list(filter((lambda t: ([t[0]] == tokenizer.encode(t[1], add_special_tokens=False))), toks))
if ((max_length is not None) and (len(toks) > max_length)):
toks = toks[:max_length]
if ((min_length is not None) and (len(toks) < min_length) and (len(toks) > 0)):
while (len(toks) < min_length):
toks = (toks + toks)
toks_ids = [t[0] for t in toks]
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if ((' ' not in output_txt) and (len(toks_ids) > 1)):
output_txt = ((tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + ' ') + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False))
if with_prefix_space:
output_txt = (' ' + output_txt)
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return (output_txt, output_ids)
def get_tokenizers(self, fast=True, **kwargs) -> List[PreTrainedTokenizerBase]:
if (fast and self.test_rust_tokenizer and self.test_slow_tokenizer):
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
elif (fast and self.test_rust_tokenizer):
return [self.get_rust_tokenizer(**kwargs)]
elif self.test_slow_tokenizer:
return [self.get_tokenizer(**kwargs)]
else:
raise ValueError('This tokenizer class has no tokenizer to be tested.')
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def tokenizer_integration_test_util(self, expected_encoding: Dict, model_name: str, revision: str=None, sequences: List[str]=None, decode_kwargs: Dict[(str, Any)]=None, padding: bool=True):
decode_kwargs = ({} if (decode_kwargs is None) else decode_kwargs)
if (sequences is None):
sequences = ['Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.']
if self.test_sentencepiece_ignore_case:
sequences = [sequence.lower() for sequence in sequences]
tokenizer_classes = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
tokenizer = tokenizer_class.from_pretrained(model_name, revision=revision)
encoding = tokenizer(sequences, padding=padding)
decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True, **decode_kwargs) for seq in encoding['input_ids']]
encoding_data = encoding.data
self.assertDictEqual(encoding_data, expected_encoding)
for (expected, decoded) in zip(sequences, decoded_sequences):
if self.test_sentencepiece_ignore_case:
expected = expected.lower()
self.assertEqual(expected, decoded)
def assert_padded_input_match(self, input_r: list, input_p: list, max_length: int, pad_token_id: int):
self.assertEqual(len(input_r), max_length)
self.assertEqual(len(input_p), max_length)
padded_tokens_r = list(takewhile((lambda i: (i == pad_token_id)), reversed(input_r)))
padded_tokens_p = list(takewhile((lambda i: (i == pad_token_id)), reversed(input_p)))
self.assertSequenceEqual(padded_tokens_r, padded_tokens_p)
def assert_batch_padded_input_match(self, input_r: dict, input_p: dict, max_length: int, pad_token_id: int, model_main_input_name: str='input_ids'):
for i_r in input_r.values():
(self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length))
(self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length))
for (i_r, i_p) in zip(input_r[model_main_input_name], input_p[model_main_input_name]):
self.assert_padded_input_match(i_r, i_p, max_length, pad_token_id)
for (i_r, i_p) in zip(input_r['attention_mask'], input_p['attention_mask']):
self.assertSequenceEqual(i_r, i_p)
def convert_batch_encode_plus_format_to_encode_plus(batch_encode_plus_sequences):
return [{value: batch_encode_plus_sequences[value][i] for value in batch_encode_plus_sequences.keys()} for i in range(len(batch_encode_plus_sequences['input_ids']))]
def test_tokenize_special_tokens(self):
tokenizers = self.get_tokenizers(fast=True, do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
SPECIAL_TOKEN_1 = '[SPECIAL_TOKEN_1]'
SPECIAL_TOKEN_2 = '[SPECIAL_TOKEN_2]'
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]})
token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1)
token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2)
self.assertEqual(len(token_1), 1)
self.assertEqual(len(token_2), 1)
self.assertEqual(token_1[0], SPECIAL_TOKEN_1)
self.assertEqual(token_2[0], SPECIAL_TOKEN_2)
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
if (not self.test_sentencepiece):
return
tokenizer = self.get_tokenizer()
text = 'This is text to test the tokenizer.'
if self.test_sentencepiece_ignore_case:
text = text.lower()
tokens = tokenizer.tokenize(text)
self.assertTrue((len(tokens) > 0))
reverse_text = tokenizer.convert_tokens_to_string(tokens)
if self.test_sentencepiece_ignore_case:
reverse_text = reverse_text.lower()
self.assertEqual(reverse_text, text)
special_tokens = tokenizer.all_special_tokens
special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens)
for special_token in special_tokens:
self.assertIn(special_token, special_tokens_string)
if self.test_rust_tokenizer:
rust_tokenizer = self.get_rust_tokenizer()
special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens)
self.assertEqual(special_tokens_string, special_tokens_string_rust)
def test_sentencepiece_tokenize_and_decode(self):
if (not self.test_sentencepiece):
return
text = 'This is text to test the tokenizer.'
if self.test_rust_tokenizer:
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
slow_ids = tokenizer(text).input_ids
fast_ids = rust_tokenizer(text).input_ids
self.assertEqual(slow_ids, fast_ids)
slow_decoded = tokenizer.decode(slow_ids)
fast_decoded = rust_tokenizer.decode(slow_ids)
self.assertEqual(slow_decoded, fast_decoded)
def test_subword_regularization_tokenizer(self) -> None:
if (not self.test_sentencepiece):
return
sp_model_kwargs = {'enable_sampling': True, 'alpha': 0.1, 'nbest_size': (- 1)}
tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs)
self.assertTrue(hasattr(tokenizer, 'sp_model_kwargs'))
self.assertIsNotNone(tokenizer.sp_model_kwargs)
self.assertTrue(isinstance(tokenizer.sp_model_kwargs, dict))
self.assertEqual(tokenizer.sp_model_kwargs, sp_model_kwargs)
self.check_subword_sampling(tokenizer)
def test_pickle_subword_regularization_tokenizer(self) -> None:
if (not self.test_sentencepiece):
return
'Google pickle __getstate__ __setstate__ if you are struggling with this.'
sp_model_kwargs = {'enable_sampling': True, 'alpha': 0.1, 'nbest_size': (- 1)}
tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs)
tokenizer_bin = pickle.dumps(tokenizer)
del tokenizer
tokenizer_new = pickle.loads(tokenizer_bin)
self.assertTrue(hasattr(tokenizer_new, 'sp_model_kwargs'))
self.assertIsNotNone(tokenizer_new.sp_model_kwargs)
self.assertTrue(isinstance(tokenizer_new.sp_model_kwargs, dict))
self.assertEqual(tokenizer_new.sp_model_kwargs, sp_model_kwargs)
self.check_subword_sampling(tokenizer_new)
def test_save_sentencepiece_tokenizer(self) -> None:
if ((not self.test_sentencepiece) or (not self.test_slow_tokenizer)):
return
text = 'This is text to test the tokenizer.'
tokenizer_slow_1 = self.get_tokenizer()
encoding_tokenizer_slow_1 = tokenizer_slow_1(text)
tmpdirname_1 = tempfile.mkdtemp()
tmpdirname_2 = tempfile.mkdtemp()
tokenizer_slow_1.save_pretrained(tmpdirname_1)
tokenizer_slow_2 = self.tokenizer_class.from_pretrained(tmpdirname_1)
encoding_tokenizer_slow_2 = tokenizer_slow_2(text)
shutil.rmtree(tmpdirname_1)
tokenizer_slow_2.save_pretrained(tmpdirname_2)
tokenizer_slow_3 = self.tokenizer_class.from_pretrained(tmpdirname_2)
encoding_tokenizer_slow_3 = tokenizer_slow_3(text)
shutil.rmtree(tmpdirname_2)
self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_2)
self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_3)
def test_model_input_names_signature(self):
accepted_model_main_input_names = ['input_ids', 'input_values']
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
self.assertTrue((tokenizer.model_input_names[0] in accepted_model_main_input_names))
def test_rust_tokenizer_signature(self):
if (not self.test_rust_tokenizer):
return
signature = inspect.signature(self.rust_tokenizer_class.__init__)
self.assertIn('tokenizer_file', signature.parameters)
self.assertIsNone(signature.parameters['tokenizer_file'].default)
def test_tokenizer_slow_store_full_signature(self):
if (not self.test_slow_tokenizer):
return
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for (parameter_name, parameter) in signature.parameters.items():
if (parameter.default != inspect.Parameter.empty):
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_tokenizer_fast_store_full_signature(self):
if (not self.test_rust_tokenizer):
return
signature = inspect.signature(self.rust_tokenizer_class.__init__)
tokenizer = self.get_rust_tokenizer()
for (parameter_name, parameter) in signature.parameters.items():
if ((parameter.default != inspect.Parameter.empty) and (parameter_name not in ['vocab_file', 'merges_file', 'tokenizer_file'])):
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
if (not self.test_slow_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
(sequence, _) = self.get_input_output_texts(tokenizer)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(sequence, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenizers_common_properties(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
attributes_list = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token']
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
self.assertTrue(hasattr(tokenizer, (attr + '_id')))
self.assertTrue(hasattr(tokenizer, 'additional_special_tokens'))
self.assertTrue(hasattr(tokenizer, 'additional_special_tokens_ids'))
attributes_list = ['model_max_length', 'init_inputs', 'init_kwargs']
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
attributes_list += ['added_tokens_encoder', 'added_tokens_decoder']
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
def test_tokenizers_common_ids_setters(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
attributes_list = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token']
vocab = tokenizer.get_vocab()
token_id_to_test_setters = next(iter(vocab.values()))
token_to_test_setters = tokenizer.convert_ids_to_tokens(token_id_to_test_setters, skip_special_tokens=False)
for attr in attributes_list:
setattr(tokenizer, (attr + '_id'), None)
self.assertEqual(getattr(tokenizer, attr), None)
self.assertEqual(getattr(tokenizer, (attr + '_id')), None)
setattr(tokenizer, (attr + '_id'), token_id_to_test_setters)
self.assertEqual(getattr(tokenizer, attr), token_to_test_setters)
self.assertEqual(getattr(tokenizer, (attr + '_id')), token_id_to_test_setters)
setattr(tokenizer, 'additional_special_tokens_ids', [])
self.assertListEqual(getattr(tokenizer, 'additional_special_tokens'), [])
self.assertListEqual(getattr(tokenizer, 'additional_special_tokens_ids'), [])
setattr(tokenizer, 'additional_special_tokens_ids', [token_id_to_test_setters])
self.assertListEqual(getattr(tokenizer, 'additional_special_tokens'), [token_to_test_setters])
self.assertListEqual(getattr(tokenizer, 'additional_special_tokens_ids'), [token_id_to_test_setters])
([(True,), (False,)])
def test_tokenizers_special_tokens_properties_unset(self, verbose):
tokenizers = self.get_tokenizers(verbose=verbose)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
attributes_list = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', 'additional_special_tokens']
for attr in attributes_list:
setattr(tokenizer, attr, None)
self.assertIsNone(getattr(tokenizer, attr))
def test_save_and_load_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
tokenizer.add_tokens(['bim', 'bambam'])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn('bim', after_vocab)
self.assertIn('bambam', after_vocab)
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
if (not tokenizer.is_fast):
continue
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
tokenizer.add_tokens(['bim', 'bambam'])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn('bim', after_vocab)
self.assertIn('bambam', after_vocab)
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
def test_pickle_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertIsNotNone(tokenizer)
text = 'Munich and Berlin are nice cities'
subwords = tokenizer.tokenize(text)
filename = os.path.join(self.tmpdirname, 'tokenizer.bin')
with open(filename, 'wb') as handle:
pickle.dump(tokenizer, handle)
with open(filename, 'rb') as handle:
tokenizer_new = pickle.load(handle)
subwords_loaded = tokenizer_new.tokenize(text)
self.assertListEqual(subwords, subwords_loaded)
_tokenizers
def test_pickle_added_tokens(self):
tok1 = AddedToken('<s>', rstrip=True, lstrip=True, normalized=False, single_word=True)
tok2 = pickle.loads(pickle.dumps(tok1))
self.assertEqual(tok1.__getstate__(), tok2.__getstate__())
def test_added_tokens_do_lower_case(self):
tokenizers = self.get_tokenizers(do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if ((not hasattr(tokenizer, 'do_lower_case')) or (not tokenizer.do_lower_case)):
continue
special_token = tokenizer.all_special_tokens[0]
text = ((special_token + ' aaaaa bbbbbb low cccccccccdddddddd l ') + special_token)
text2 = ((special_token + ' AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l ') + special_token)
toks_before_adding = tokenizer.tokenize(text)
new_toks = ['aaaaa bbbbbb', 'cccccccccdddddddd', 'AAAAA BBBBBB', 'CCCCCCCCCDDDDDDDD']
added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks])
toks_after_adding = tokenizer.tokenize(text)
toks_after_adding2 = tokenizer.tokenize(text2)
self.assertIn(added, [2, 4])
self.assertListEqual(toks_after_adding, toks_after_adding2)
self.assertTrue((len(toks_before_adding) > len(toks_after_adding)))
sequence_with_special_tokens = (('A ' + ' yEs '.join(tokenizer.all_special_tokens)) + ' B')
tokenized_sequence = ''.join(tokenizer.tokenize(sequence_with_special_tokens))
for special_token in tokenizer.all_special_tokens:
self.assertTrue((special_token in tokenized_sequence))
tokenizers = self.get_tokenizers(do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (hasattr(tokenizer, 'do_lower_case') and tokenizer.do_lower_case):
continue
special_token = tokenizer.all_special_tokens[0]
text = ((special_token + ' aaaaa bbbbbb low cccccccccdddddddd l ') + special_token)
text2 = ((special_token + ' AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l ') + special_token)
toks_before_adding = tokenizer.tokenize(text)
new_toks = ['aaaaa bbbbbb', 'cccccccccdddddddd', 'AAAAA BBBBBB', 'CCCCCCCCCDDDDDDDD']
added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks])
self.assertIn(added, [2, 4])
toks_after_adding = tokenizer.tokenize(text)
toks_after_adding2 = tokenizer.tokenize(text2)
self.assertEqual(len(toks_after_adding), len(toks_after_adding2))
self.assertNotEqual(toks_after_adding[1], toks_after_adding2[1])
self.assertTrue((len(toks_before_adding) > len(toks_after_adding)))
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
new_toks = ['aaaaa bbbbbb', 'cccccccccdddddddd']
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, (all_size + len(new_toks)))
tokens = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l', add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[(- 2)], (tokenizer.vocab_size - 1))
new_toks_2 = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, (all_size_2 + len(new_toks_2)))
tokens = tokenizer.encode('>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l', add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[(- 2)], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[(- 2)], tokens[(- 3)])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[(- 2)], tokenizer.pad_token_id)
def test_add_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(input_text, ids) = self.get_clean_sequence(tokenizer)
special_token = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
text = tokenizer.decode((ids + encoded_special_token), clean_up_tokenization_spaces=False)
encoded = tokenizer.encode(text, add_special_tokens=False)
input_encoded = tokenizer.encode(input_text, add_special_tokens=False)
special_token_id = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(encoded, (input_encoded + special_token_id))
decoded = tokenizer.decode(encoded, skip_special_tokens=True)
self.assertTrue((special_token not in decoded))
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(input_text, output_text) = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
new_toks = [AddedToken('[ABC]', normalized=False), AddedToken('[DEF]', normalized=False), AddedToken('GHI IHG', normalized=False)]
tokenizer.add_tokens(new_toks)
input = '[ABC][DEF][ABC]GHI IHG[DEF]'
if self.space_between_special_tokens:
output = '[ABC] [DEF] [ABC] GHI IHG [DEF]'
else:
output = input
encoded = tokenizer.encode(input, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
def test_pretrained_model_lists(self):
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1)
self.assertEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), len(self.tokenizer_class.max_model_input_sizes))
weights_list = list(self.tokenizer_class.max_model_input_sizes.keys())
weights_lists_2 = []
for (file_id, map_list) in self.tokenizer_class.pretrained_vocab_files_map.items():
weights_lists_2.append(list(map_list.keys()))
for weights_list_2 in weights_lists_2:
self.assertListEqual(weights_list, weights_list_2)
def test_mask_output(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if ((tokenizer.build_inputs_with_special_tokens.__qualname__.split('.')[0] != 'PreTrainedTokenizer') and ('token_type_ids' in tokenizer.model_input_names)):
seq_0 = 'Test this method.'
seq_1 = 'With these inputs.'
information = tokenizer.encode_plus(seq_0, seq_1, add_special_tokens=True)
(sequences, mask) = (information['input_ids'], information['token_type_ids'])
self.assertEqual(len(sequences), len(mask))
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
seq_0 = 'Test this method.'
output = tokenizer(seq_0, return_token_type_ids=True)
self.assertIn(0, output['token_type_ids'])
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if (not tokenizer.is_fast):
continue
with self.subTest(f'{tokenizer.__class__.__name__}'):
seq_0 = 'Test this method.'
seq_1 = 'With these inputs.'
output = tokenizer(seq_0)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
seq_0 = 'Test this method.'
seq_1 = 'With these inputs.'
sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=False)
attached_sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=True)
if (len(attached_sequences) != 2):
self.assertEqual(tokenizer.num_special_tokens_to_add(pair=True), (len(attached_sequences) - len(sequences)))
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(seq_0, ids) = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer.encode(seq_0, add_special_tokens=False)
total_length = len(sequence)
self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it, it's too short")
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = (seq_0 * model_max_length)
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1['input_ids'])
self.assertGreater(total_length1, model_max_length, "Issue with the testing sequence, please update it, it's too short")
padding_strategies = ([False, True, 'longest'] if (tokenizer.pad_token and (tokenizer.pad_token_id >= 0)) else [False])
for padding_state in padding_strategies:
with self.subTest(f'Padding: {padding_state}'):
for truncation_state in [True, 'longest_first', 'only_first']:
with self.subTest(f'Truncation: {truncation_state}'):
output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids']), model_max_length)
output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids'][0]), model_max_length)
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer(seq_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer([seq_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
stride = 2
information = tokenizer(seq_0, max_length=(total_length - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (total_length - 2))
self.assertEqual(truncated_sequence, sequence[:(- 2)])
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, sequence[(- (2 + stride)):])
else:
truncated_sequence = information['input_ids']
overflowing_tokens = information['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (total_length - 2))
self.assertEqual(truncated_sequence, sequence[:(- 2)])
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, sequence[(- (2 + stride)):])
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
stride = 2
(seq_0, ids) = self.get_clean_sequence(tokenizer, max_length=20)
if (len(ids) <= (2 + stride)):
seq_0 = ((seq_0 + ' ') * (2 + stride))
ids = None
seq0_tokens = tokenizer.encode(seq_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens), (2 + stride))
seq_1 = 'This is another sentence to be encoded.'
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
if (abs((len(seq0_tokens) - len(seq1_tokens))) <= 2):
seq1_tokens = (seq1_tokens + seq1_tokens)
seq_1 = tokenizer.decode(seq1_tokens, clean_up_tokenization_spaces=False)
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens), (2 + stride))
smallest = (seq1_tokens if (len(seq0_tokens) > len(seq1_tokens)) else seq0_tokens)
sequence = tokenizer.encode(seq_0, seq_1, add_special_tokens=False)
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = (seq_0 * model_max_length)
self.assertGreater(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1['input_ids'])
sequence2 = tokenizer(seq_2, seq_1, add_special_tokens=False)
total_length2 = len(sequence2['input_ids'])
self.assertLess(total_length1, (model_max_length - 10), 'Issue with the testing sequence, please update it.')
self.assertGreater(total_length2, model_max_length, 'Issue with the testing sequence, please update it.')
padding_strategies = ([False, True, 'longest'] if (tokenizer.pad_token and (tokenizer.pad_token_id >= 0)) else [False])
for padding_state in padding_strategies:
with self.subTest(f'{tokenizer.__class__.__name__} Padding: {padding_state}'):
for truncation_state in [True, 'longest_first', 'only_first']:
with self.subTest(f'{tokenizer.__class__.__name__} Truncation: {truncation_state}'):
output = tokenizer(seq_2, seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids']), model_max_length)
output = tokenizer([seq_2], [seq_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids'][0]), model_max_length)
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation='only_second')
self.assertEqual(len(output['input_ids']), model_max_length)
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation='only_second')
self.assertEqual(len(output['input_ids'][0]), model_max_length)
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
truncated_first_sequence = (tokenizer.encode(seq_0, add_special_tokens=False)[:(- 2)] + tokenizer.encode(seq_1, add_special_tokens=False))
truncated_second_sequence = (tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[:(- 2)])
truncated_longest_sequence = (truncated_first_sequence if (len(seq0_tokens) > len(seq1_tokens)) else truncated_second_sequence)
overflow_first_sequence = (tokenizer.encode(seq_0, add_special_tokens=False)[(- (2 + stride)):] + tokenizer.encode(seq_1, add_special_tokens=False))
overflow_second_sequence = (tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[(- (2 + stride)):])
overflow_longest_sequence = (overflow_first_sequence if (len(seq0_tokens) > len(seq1_tokens)) else overflow_second_sequence)
if isinstance(tokenizer, PreTrainedTokenizerFast):
information = tokenizer(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
with self.assertRaises(ValueError) as context:
information = tokenizer(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
self.assertTrue(context.exception.args[0].startswith('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.'))
if isinstance(tokenizer, PreTrainedTokenizerFast):
information = tokenizer(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
with self.assertRaises(ValueError) as context:
information = tokenizer(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
self.assertTrue(context.exception.args[0].startswith('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.'))
information_first_truncated = tokenizer(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation='only_first', return_overflowing_tokens=True)
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information_first_truncated['input_ids'][0]
overflowing_tokens = information_first_truncated['input_ids'][1]
self.assertEqual(len(information_first_truncated['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(seq1_tokens)))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
else:
truncated_sequence = information_first_truncated['input_ids']
overflowing_tokens = information_first_truncated['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, seq0_tokens[(- (2 + stride)):])
information_second_truncated = tokenizer(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation='only_second', return_overflowing_tokens=True)
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information_second_truncated['input_ids'][0]
overflowing_tokens = information_second_truncated['input_ids'][1]
self.assertEqual(len(information_second_truncated['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(seq0_tokens)))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
else:
truncated_sequence = information_second_truncated['input_ids']
overflowing_tokens = information_second_truncated['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, seq1_tokens[(- (2 + stride)):])
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence_0 = 'Encode this.'
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(sequence_0, add_special_tokens=True, return_special_tokens_mask=True)
encoded_sequence_w_special = encoded_sequence_dict['input_ids']
special_tokens_mask = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for (i, x) in enumerate(encoded_sequence_w_special) if (not special_tokens_mask[i])]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence_0 = 'Encode this.'
sequence_1 = 'This one too please.'
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(sequence_0, sequence_1, add_special_tokens=True, return_special_tokens_mask=True)
encoded_sequence_w_special = encoded_sequence_dict['input_ids']
special_tokens_mask = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [(x if (not special_tokens_mask[i]) else None) for (i, x) in enumerate(encoded_sequence_w_special)]
filtered_sequence = [x for x in filtered_sequence if (x is not None)]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_padding_side_in_kwargs(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
if self.test_rust_tokenizer:
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, padding_side='left', **kwargs)
self.assertEqual(tokenizer_r.padding_side, 'left')
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, padding_side='right', **kwargs)
self.assertEqual(tokenizer_r.padding_side, 'right')
self.assertRaises(ValueError, self.rust_tokenizer_class.from_pretrained, pretrained_name, padding_side='unauthorized', **kwargs)
if self.test_slow_tokenizer:
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, padding_side='left', **kwargs)
self.assertEqual(tokenizer_p.padding_side, 'left')
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, padding_side='right', **kwargs)
self.assertEqual(tokenizer_p.padding_side, 'right')
self.assertRaises(ValueError, self.tokenizer_class.from_pretrained, pretrained_name, padding_side='unauthorized', **kwargs)
def test_truncation_side_in_kwargs(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
if self.test_rust_tokenizer:
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, truncation_side='left', **kwargs)
self.assertEqual(tokenizer_r.truncation_side, 'left')
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, truncation_side='right', **kwargs)
self.assertEqual(tokenizer_r.truncation_side, 'right')
self.assertRaises(ValueError, self.rust_tokenizer_class.from_pretrained, pretrained_name, truncation_side='unauthorized', **kwargs)
if self.test_slow_tokenizer:
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, truncation_side='left', **kwargs)
self.assertEqual(tokenizer_p.truncation_side, 'left')
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, truncation_side='right', **kwargs)
self.assertEqual(tokenizer_p.truncation_side, 'right')
self.assertRaises(ValueError, self.tokenizer_class.from_pretrained, pretrained_name, truncation_side='unauthorized', **kwargs)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence = 'Sequence'
padding_size = 10
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = 'right'
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(sequence, max_length=(sequence_length + padding_size), padding='max_length')
padded_sequence_length = len(padded_sequence)
self.assertEqual((sequence_length + padding_size), padded_sequence_length)
self.assertEqual((encoded_sequence + ([padding_idx] * padding_size)), padded_sequence)
tokenizer.padding_side = 'left'
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(sequence, max_length=(sequence_length + padding_size), padding='max_length')
padded_sequence_length = len(padded_sequence)
self.assertEqual((sequence_length + padding_size), padded_sequence_length)
self.assertEqual((([padding_idx] * padding_size) + encoded_sequence), padded_sequence)
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(sequence, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
self.assertEqual(sequence_length, padded_sequence_right_length)
self.assertEqual(encoded_sequence, padded_sequence_right)
tokenizer.padding_side = 'left'
padded_sequence_left = tokenizer.encode(sequence, padding='longest')
padded_sequence_left_length = len(padded_sequence_left)
self.assertEqual(sequence_length, padded_sequence_left_length)
self.assertEqual(encoded_sequence, padded_sequence_left)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(sequence)
padded_sequence_right_length = len(padded_sequence_right)
self.assertEqual(sequence_length, padded_sequence_right_length)
self.assertEqual(encoded_sequence, padded_sequence_right)
tokenizer.padding_side = 'left'
padded_sequence_left = tokenizer.encode(sequence, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
self.assertEqual(sequence_length, padded_sequence_left_length)
self.assertEqual(encoded_sequence, padded_sequence_left)
def test_right_and_left_truncation(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence = 'This is a test sequence'
truncation_size = 3
tokenizer.truncation_side = 'right'
encoded_sequence = tokenizer.encode(sequence, add_special_tokens=False)
sequence_length = len(encoded_sequence)
truncated_sequence = tokenizer.encode(sequence, max_length=(sequence_length - truncation_size), truncation=True, add_special_tokens=False)
truncated_sequence_length = len(truncated_sequence)
self.assertEqual(sequence_length, (truncated_sequence_length + truncation_size))
self.assertEqual(encoded_sequence[:(- truncation_size)], truncated_sequence)
tokenizer.truncation_side = 'left'
sequence_length = len(encoded_sequence)
truncated_sequence = tokenizer.encode(sequence, max_length=(sequence_length - truncation_size), truncation=True, add_special_tokens=False)
truncated_sequence_length = len(truncated_sequence)
self.assertEqual(sequence_length, (truncated_sequence_length + truncation_size))
self.assertEqual(encoded_sequence[truncation_size:], truncated_sequence)
sequence_length = len(encoded_sequence)
tokenizer.truncation_side = 'right'
truncated_sequence_right = tokenizer.encode(sequence, truncation=True, add_special_tokens=False)
truncated_sequence_right_length = len(truncated_sequence_right)
self.assertEqual(sequence_length, truncated_sequence_right_length)
self.assertEqual(encoded_sequence, truncated_sequence_right)
tokenizer.truncation_side = 'left'
truncated_sequence_left = tokenizer.encode(sequence, truncation='longest_first', add_special_tokens=False)
truncated_sequence_left_length = len(truncated_sequence_left)
self.assertEqual(sequence_length, truncated_sequence_left_length)
self.assertEqual(encoded_sequence, truncated_sequence_left)
tokenizer.truncation_side = 'right'
truncated_sequence_right = tokenizer.encode(sequence, add_special_tokens=False)
truncated_sequence_right_length = len(truncated_sequence_right)
self.assertEqual(sequence_length, truncated_sequence_right_length)
self.assertEqual(encoded_sequence, truncated_sequence_right)
tokenizer.truncation_side = 'left'
truncated_sequence_left = tokenizer.encode(sequence, truncation=False, add_special_tokens=False)
truncated_sequence_left_length = len(truncated_sequence_left)
self.assertEqual(sequence_length, truncated_sequence_left_length)
self.assertEqual(encoded_sequence, truncated_sequence_left)
def test_padding_to_max_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence = 'Sequence'
padding_size = 10
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = 'right'
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(sequence, max_length=(sequence_length + padding_size), pad_to_max_length=True)
padded_sequence_length = len(padded_sequence)
self.assertEqual((sequence_length + padding_size), padded_sequence_length)
self.assertEqual((encoded_sequence + ([padding_idx] * padding_size)), padded_sequence)
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(sequence, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
self.assertEqual(sequence_length, padded_sequence_right_length)
self.assertEqual(encoded_sequence, padded_sequence_right)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.pad_token is None):
self.skipTest('No padding token.')
else:
empty_tokens = tokenizer('', padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer('This is a sample input', padding=True, pad_to_multiple_of=8)
for (key, value) in empty_tokens.items():
self.assertEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
for (key, value) in normal_tokens.items():
self.assertEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
normal_tokens = tokenizer('This', pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertNotEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
normal_tokens = tokenizer('This', padding=True, truncation=True, pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
self.assertRaises(ValueError, tokenizer.__call__, 'This', padding=True, truncation=True, max_length=12, pad_to_multiple_of=8)
def test_padding_with_attention_mask(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.pad_token is None):
self.skipTest('No padding token.')
if ('attention_mask' not in tokenizer.model_input_names):
self.skipTest('This model does not use attention mask.')
features = [{'input_ids': [1, 2, 3, 4, 5, 6], 'attention_mask': [1, 1, 1, 1, 1, 0]}, {'input_ids': [1, 2, 3], 'attention_mask': [1, 1, 0]}]
padded_features = tokenizer.pad(features)
if (tokenizer.padding_side == 'right'):
self.assertListEqual(padded_features['attention_mask'], [[1, 1, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0]])
else:
self.assertListEqual(padded_features['attention_mask'], [[1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0]])
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence = 'Sequence'
self._check_no_pad_token_padding(tokenizer, sequence)
padding_size = 10
padding_idx = tokenizer.pad_token_id
token_type_padding_idx = tokenizer.pad_token_type_id
encoded_sequence = tokenizer.encode_plus(sequence, return_special_tokens_mask=True)
input_ids = encoded_sequence['input_ids']
special_tokens_mask = encoded_sequence['special_tokens_mask']
sequence_length = len(input_ids)
tokenizer.padding_side = 'right'
not_padded_sequence = tokenizer.encode_plus(sequence, padding=True, return_special_tokens_mask=True)
not_padded_input_ids = not_padded_sequence['input_ids']
not_padded_special_tokens_mask = not_padded_sequence['special_tokens_mask']
not_padded_sequence_length = len(not_padded_input_ids)
self.assertEqual(sequence_length, not_padded_sequence_length)
self.assertEqual(input_ids, not_padded_input_ids)
self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus(sequence, padding=False, return_special_tokens_mask=True)
not_padded_input_ids = not_padded_sequence['input_ids']
not_padded_special_tokens_mask = not_padded_sequence['special_tokens_mask']
not_padded_sequence_length = len(not_padded_input_ids)
self.assertEqual(sequence_length, not_padded_sequence_length)
self.assertEqual(input_ids, not_padded_input_ids)
self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask)
tokenizer.padding_side = 'right'
right_padded_sequence = tokenizer.encode_plus(sequence, max_length=(sequence_length + padding_size), padding='max_length', return_special_tokens_mask=True)
right_padded_input_ids = right_padded_sequence['input_ids']
right_padded_special_tokens_mask = right_padded_sequence['special_tokens_mask']
right_padded_sequence_length = len(right_padded_input_ids)
self.assertEqual((sequence_length + padding_size), right_padded_sequence_length)
self.assertEqual((input_ids + ([padding_idx] * padding_size)), right_padded_input_ids)
self.assertEqual((special_tokens_mask + ([1] * padding_size)), right_padded_special_tokens_mask)
tokenizer.padding_side = 'left'
left_padded_sequence = tokenizer.encode_plus(sequence, max_length=(sequence_length + padding_size), padding='max_length', return_special_tokens_mask=True)
left_padded_input_ids = left_padded_sequence['input_ids']
left_padded_special_tokens_mask = left_padded_sequence['special_tokens_mask']
left_padded_sequence_length = len(left_padded_input_ids)
self.assertEqual((sequence_length + padding_size), left_padded_sequence_length)
self.assertEqual((([padding_idx] * padding_size) + input_ids), left_padded_input_ids)
self.assertEqual((([1] * padding_size) + special_tokens_mask), left_padded_special_tokens_mask)
if ('token_type_ids' in tokenizer.model_input_names):
token_type_ids = encoded_sequence['token_type_ids']
left_padded_token_type_ids = left_padded_sequence['token_type_ids']
right_padded_token_type_ids = right_padded_sequence['token_type_ids']
self.assertEqual((token_type_ids + ([token_type_padding_idx] * padding_size)), right_padded_token_type_ids)
self.assertEqual((([token_type_padding_idx] * padding_size) + token_type_ids), left_padded_token_type_ids)
if ('attention_mask' in tokenizer.model_input_names):
attention_mask = encoded_sequence['attention_mask']
right_padded_attention_mask = right_padded_sequence['attention_mask']
left_padded_attention_mask = left_padded_sequence['attention_mask']
self.assertEqual((attention_mask + ([0] * padding_size)), right_padded_attention_mask)
self.assertEqual((([0] * padding_size) + attention_mask), left_padded_attention_mask)
def test_padding_warning_message_fast_tokenizer(self):
if (not self.test_rust_tokenizer):
return
sequence = 'This is a text'
tokenizer_fast = self.get_rust_tokenizer()
self._check_no_pad_token_padding(tokenizer_fast, sequence)
encoding_fast = tokenizer_fast(sequence)
with self.assertLogs('transformers', level='WARNING') as cm:
tokenizer_fast.pad(encoding_fast)
self.assertEqual(len(cm.records), 1)
self.assertIn('Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.', cm.records[0].message)
if (not self.test_slow_tokenizer):
return
tokenizer_slow = self.get_tokenizer()
self._check_no_pad_token_padding(tokenizer_slow, sequence)
encoding_slow = tokenizer_slow(sequence)
with self.assertLogs(level='WARNING') as cm:
logger.warning('Dummy warning')
tokenizer_slow.pad(encoding_slow)
self.assertEqual(len(cm.records), 1)
self.assertIn('Dummy warning', cm.records[0].message)
def test_separate_tokenizers(self):
tokenizers = self.get_tokenizers(random_argument=True)
new_tokenizers = self.get_tokenizers(random_argument=False)
for (tokenizer, new_tokenizer) in zip(tokenizers, new_tokenizers):
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertTrue(tokenizer.init_kwargs['random_argument'])
self.assertTrue(tokenizer.init_kwargs['random_argument'])
self.assertFalse(new_tokenizer.init_kwargs['random_argument'])
def test_get_vocab(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
vocab_dict = tokenizer.get_vocab()
self.assertIsInstance(vocab_dict, dict)
self.assertGreaterEqual(len(tokenizer), len(vocab_dict))
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
tokenizer.add_tokens(['asdfasdfasdfasdf'])
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
def test_conversion_reversible(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
vocab = tokenizer.get_vocab()
for (word, ind) in vocab.items():
if (word == tokenizer.unk_token):
continue
self.assertEqual(tokenizer.convert_tokens_to_ids(word), ind)
self.assertEqual(tokenizer.convert_ids_to_tokens(ind), word)
def test_call(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
encoded_sequences_1 = tokenizer.encode_plus(sequences[0])
encoded_sequences_2 = tokenizer(sequences[0])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
encoded_sequences_1 = tokenizer.encode_plus(sequences[0], sequences[1])
encoded_sequences_2 = tokenizer(sequences[0], sequences[1])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
encoded_sequences_1 = tokenizer.batch_encode_plus(sequences)
encoded_sequences_2 = tokenizer(sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
encoded_sequences_1 = tokenizer.batch_encode_plus(list(zip(sequences, sequences)))
encoded_sequences_2 = tokenizer(sequences, sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
encoded_sequences = [tokenizer.encode_plus(sequence) for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, padding=False)
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
maximum_length = len(max([encoded_sequence['input_ids'] for encoded_sequence in encoded_sequences], key=len))
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences_padded = [tokenizer.encode_plus(sequence, max_length=maximum_length, padding='max_length') for sequence in sequences]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(sequences, padding=True)
self.assertListEqual(encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded))
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(sequences, max_length=(maximum_length + 10), padding='longest')
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key])
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(sequences, max_length=(maximum_length + 10), padding=False)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key])
_tokenizers
def test_added_token_are_matched_longest_first(self):
if (not self.test_slow_tokenizer):
self.skipTest('This test is only for slow tokenizers')
return
tokenizers = self.get_tokenizers(fast=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
try:
tokenizer.add_tokens([AddedToken('extra_id_1')])
tokenizer.add_tokens([AddedToken('extra_id_100')])
except Exception:
self.skipTest('Cannot add those Added tokens')
tokens = tokenizer.tokenize('This is some extra_id_100')
self.assertIn('extra_id_100', tokens)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tokenizer.add_tokens([AddedToken('extra_id_100')])
tokenizer.add_tokens([AddedToken('extra_id_1')])
tokens = tokenizer.tokenize('This is some extra_id_100')
self.assertIn('extra_id_100', tokens)
_tokenizers
def test_added_token_serializable(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
new_token = AddedToken('new_token', lstrip=True)
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]})
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(tmp_dir_name)
tokenizer.from_pretrained(tmp_dir_name)
def test_batch_encode_plus_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
max_length = 100
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [tokenizer.encode_plus(sequence, max_length=max_length, padding='max_length') for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, max_length=max_length, padding='max_length')
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tokenizer.padding_side = 'left'
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
max_length = 100
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [tokenizer.encode_plus(sequence, max_length=max_length, padding='max_length') for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, max_length=max_length, padding='max_length')
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
def test_pretokenized_inputs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (hasattr(tokenizer, 'add_prefix_space') and (not tokenizer.add_prefix_space)):
continue
(sequence, ids) = self.get_clean_sequence(tokenizer, with_prefix_space=True, max_length=20)
token_sequence = sequence.split()
output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode(sequence, add_special_tokens=False)
self.assertEqual(output, output_sequence)
output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode(sequence, add_special_tokens=True)
self.assertEqual(output, output_sequence)
output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
sequence_batch = (([sequence.strip()] * 2) + [((sequence.strip() + ' ') + sequence.strip())])
token_sequence_batch = [s.split() for s in sequence_batch]
sequence_batch_cleaned_up_spaces = [(' ' + ' '.join(s)) for s in token_sequence_batch]
output = tokenizer.batch_encode_plus(token_sequence_batch, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.batch_encode_plus(sequence_batch_cleaned_up_spaces, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.batch_encode_plus(token_sequence_batch, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.batch_encode_plus(sequence_batch_cleaned_up_spaces, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=False)
self.assertEqual(output, output_sequence)
output = tokenizer.encode(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=True)
self.assertEqual(output, output_sequence)
output = tokenizer.encode_plus(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode_plus(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
sequence_pair_batch = (([(sequence.strip(), sequence.strip())] * 2) + [(((sequence.strip() + ' ') + sequence.strip()), sequence.strip())])
token_sequence_pair_batch = [tuple((s.split() for s in pair)) for pair in sequence_pair_batch]
sequence_pair_batch_cleaned_up_spaces = [tuple(((' ' + ' '.join(s)) for s in pair)) for pair in token_sequence_pair_batch]
output = tokenizer.batch_encode_plus(token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.batch_encode_plus(sequence_pair_batch_cleaned_up_spaces, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.batch_encode_plus(token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.batch_encode_plus(sequence_pair_batch_cleaned_up_spaces, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
string_sequence = 'Testing the prepare_for_model method.'
ids = tokenizer.encode(string_sequence, add_special_tokens=False)
prepared_input_dict = tokenizer.prepare_for_model(ids, add_special_tokens=True)
input_dict = tokenizer.encode_plus(string_sequence, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_batch_encode_plus_overflowing_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
string_sequences = ['Testing the prepare_for_model method.', 'Test']
if (tokenizer.pad_token is None):
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
tokenizer.batch_encode_plus(string_sequences, return_overflowing_tokens=True, truncation=True, padding=True, max_length=3)
_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors='pt')
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors='tf')
if (tokenizer.pad_token_id is None):
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, padding=True, return_tensors='pt')
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, padding='longest', return_tensors='tf')
else:
pytorch_tensor = tokenizer.batch_encode_plus(sequences, padding=True, return_tensors='pt')
tensorflow_tensor = tokenizer.batch_encode_plus(sequences, padding='longest', return_tensors='tf')
encoded_sequences = tokenizer.batch_encode_plus(sequences, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def _check_no_pad_token_padding(self, tokenizer, sequences):
if (tokenizer.pad_token_id is None):
with self.assertRaises(ValueError):
if isinstance(sequences, list):
tokenizer.batch_encode_plus(sequences, padding='longest')
else:
tokenizer.encode_plus(sequences, padding=True)
tokenizer.add_special_tokens({'pad_token': '<PAD>'})
def check_subword_sampling(self, tokenizer: PreTrainedTokenizer, text: str=None) -> None:
text = ('This is a test for subword regularization.' if (text is None) else text)
if self.test_sentencepiece_ignore_case:
text = text.lower()
tokens_list = []
for _ in range(5):
tokens_list.append(tokenizer.tokenize(text))
combinations = itertools.combinations(tokens_list, 2)
subword_sampling_found = False
for combination in combinations:
if (combination[0] != combination[1]):
subword_sampling_found = True
self.assertTrue(subword_sampling_found)
for tokens in tokens_list:
if self.test_sentencepiece_ignore_case:
self.assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower())
else:
self.assertEqual(text, tokenizer.convert_tokens_to_string(tokens))
_torch
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
model = model_class(config)
is_using_common_embeddings = hasattr(model.get_input_embeddings(), 'weight')
if is_using_common_embeddings:
self.assertGreaterEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer))
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors='pt')
encoded_sequence.to(model.device)
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors='pt')
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
_tf
def test_tf_encode_plus_sent_to_model(self):
from transformers import TF_MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(TF_MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
model = model_class(config)
self.assertGreaterEqual(model.config.vocab_size, len(tokenizer))
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors='tf')
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors='tf')
model(encoded_sequence)
model(batch_encoded_sequence)
_torch
def test_np_encode_plus_sent_to_model(self):
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors='np')
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors='np')
if (encoded_sequence is None):
raise ValueError('Cannot convert list to numpy tensor on encode_plus()')
if (batch_encoded_sequence is None):
raise ValueError('Cannot convert list to numpy tensor on batch_encode_plus()')
if self.test_rust_tokenizer:
fast_tokenizer = self.get_rust_tokenizer()
encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors='np')
batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus([sequence, sequence], return_tensors='np')
if (encoded_sequence_fast is None):
raise ValueError('Cannot convert list to numpy tensor on encode_plus() (fast)')
if (batch_encoded_sequence_fast is None):
raise ValueError('Cannot convert list to numpy tensor on batch_encode_plus() (fast)')
_torch
def test_prepare_seq2seq_batch(self):
if (not self.test_seq2seq):
return
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
src_text = [' UN Chief Says There Is No Military Solution in Syria', " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people."]
tgt_text = ['Seful ONU declara ca nu exista o solutie militara in Siria', 'Secretarul General Ban Ki-moon declara ca raspunsul sau la intensificarea sprijinului militar al Rusiei pentru Siria este ca "nu exista o solutie militara" la conflictul de aproape cinci ani si ca noi arme nu vor face decat sa inrautateasca violentele si mizeria pentru milioane de oameni.']
try:
batch = tokenizer.prepare_seq2seq_batch(src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors='pt', src_lang='en_XX')
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, max_length=3, return_tensors='pt')
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 3)
batch_encoder_only = tokenizer.prepare_seq2seq_batch(src_texts=src_text, max_length=3, max_target_length=10, return_tensors='pt')
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn('decoder_input_ids', batch_encoder_only)
def test_is_fast(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertTrue(tokenizer_r.is_fast)
if self.test_slow_tokenizer:
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertFalse(tokenizer_p.is_fast)
def test_fast_only_inputs(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertRaises(TypeError, tokenizer_r.tokenize, None)
self.assertRaises(TypeError, tokenizer_r.encode, None)
self.assertRaises(TypeError, tokenizer_r.encode_plus, None)
self.assertRaises(TypeError, tokenizer_r.batch_encode_plus, None)
def test_alignement_methods(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words = ['Wonderful', 'no', 'inspiration', 'example', 'with', 'subtoken']
text = ' '.join(words)
batch_size = 3
encoding = tokenizer_r.encode_plus(text, add_special_tokens=False)
batch_encoding = tokenizer_r.batch_encode_plus(([text] * batch_size), add_special_tokens=False)
num_tokens = len(encoding['input_ids'])
last_word_index = (len(words) - 1)
last_token_index = (num_tokens - 1)
last_batch_index = (batch_size - 1)
last_char_index = (len(text) - 1)
self.assertEqual(len(encoding.words(0)), num_tokens)
self.assertEqual(max(encoding.words(0)), last_word_index)
self.assertEqual(min(encoding.words(0)), 0)
self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens)
self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index)
self.assertEqual(min(batch_encoding.words(last_batch_index)), 0)
self.assertEqual(len(encoding.tokens(0)), num_tokens)
self.assertEqual(encoding.token_to_word(0), 0)
self.assertEqual(encoding.token_to_word(0, 0), 0)
self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)
self.assertEqual(encoding.word_to_tokens(0).start, 0)
self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
self.assertEqual(encoding.word_to_tokens(last_word_index).end, (last_token_index + 1))
self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, (last_token_index + 1))
self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, (last_token_index + 1))
self.assertEqual(batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, (last_token_index + 1))
self.assertEqual(encoding.token_to_chars(0).start, 0)
self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
self.assertEqual(encoding.token_to_chars(last_token_index).end, (last_char_index + 1))
self.assertEqual(encoding.token_to_chars(0, last_token_index).end, (last_char_index + 1))
self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, (last_char_index + 1))
self.assertEqual(batch_encoding.token_to_chars(last_batch_index, last_token_index).end, (last_char_index + 1))
self.assertEqual(encoding.char_to_token(0), 0)
self.assertEqual(encoding.char_to_token(0, 0), 0)
self.assertEqual(encoding.char_to_token(last_char_index), last_token_index)
self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(1, 0), 0)
self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index)
self.assertEqual(encoding.char_to_word(0), 0)
self.assertEqual(encoding.char_to_word(0, 0), 0)
self.assertEqual(encoding.char_to_word(last_char_index), last_word_index)
self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(1, 0), 0)
self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index)
self.assertEqual(encoding.word_to_chars(0).start, 0)
self.assertEqual(encoding.word_to_chars(0, 0).start, 0)
self.assertEqual(encoding.word_to_chars(last_word_index).end, (last_char_index + 1))
self.assertEqual(encoding.word_to_chars(0, last_word_index).end, (last_char_index + 1))
self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, (last_char_index + 1))
self.assertEqual(batch_encoding.word_to_chars(last_batch_index, last_word_index).end, (last_char_index + 1))
self.assertEqual(encoding.token_to_sequence((num_tokens // 2)), 0)
self.assertEqual(encoding.token_to_sequence(0, (num_tokens // 2)), 0)
self.assertEqual(batch_encoding.token_to_sequence(1, (num_tokens // 2)), 0)
self.assertEqual(batch_encoding.token_to_sequence(0, (num_tokens // 2)), 0)
self.assertEqual(batch_encoding.token_to_sequence(last_batch_index, (num_tokens // 2)), 0)
words = ['Wonderful', 'no', 'inspiration', 'example', 'with', 'subtoken']
text = ' '.join(words)
pair_words = ['Amazing', 'example', 'full', 'of', 'inspiration']
pair_text = ' '.join(pair_words)
batch_size = 3
index_word_in_first_seq = words.index('inspiration')
index_word_in_pair_seq = pair_words.index('inspiration')
index_char_in_first_seq = text.find('inspiration')
index_char_in_pair_seq = pair_text.find('inspiration')
pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=False)
pair_batch_encoding = tokenizer_r.batch_encode_plus(([(text, pair_text)] * batch_size), add_special_tokens=False)
num_tokens = len(encoding['input_ids'])
last_word_index = (len(words) - 1)
last_token_index = (num_tokens - 1)
last_batch_index = (batch_size - 1)
last_char_index = (len(text) - 1)
self.assertNotEqual(pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start)
self.assertEqual(pair_encoding['input_ids'][pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start], pair_encoding['input_ids'][pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start])
self.assertNotEqual(pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start)
self.assertEqual(pair_batch_encoding['input_ids'][1][pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start], pair_batch_encoding['input_ids'][1][pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start])
self.assertNotEqual(pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1))
self.assertEqual(pair_encoding['input_ids'][pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0)], pair_encoding['input_ids'][pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1)])
self.assertNotEqual(pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1))
self.assertEqual(pair_batch_encoding['input_ids'][1][pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0)], pair_batch_encoding['input_ids'][1][pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1)])
self.assertNotEqual(pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1))
self.assertEqual(words[pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0)], pair_words[pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1)])
self.assertNotEqual(pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1))
self.assertEqual(words[pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0)], pair_words[pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1)])
self.assertNotEqual(pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start)
self.assertEqual(text[pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start], pair_text[pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start])
self.assertNotEqual(pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start)
self.assertEqual(text[pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start], pair_text[pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start])
pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=True)
pair_sequence_ids = [pair_encoding.token_to_sequence(i) for i in range(len(pair_encoding['input_ids']))]
self.assertIn(0, pair_sequence_ids)
self.assertIn(1, pair_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_sequence_ids)
pair_batch_encoding = tokenizer_r.batch_encode_plus(([(text, pair_text)] * batch_size), add_special_tokens=True)
pair_batch_sequence_ids = [pair_batch_encoding.token_to_sequence(1, i) for i in range(len(pair_batch_encoding['input_ids'][0]))]
self.assertIn(0, pair_batch_sequence_ids)
self.assertIn(1, pair_batch_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_batch_sequence_ids)
def test_tokenization_python_rust_equals(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
input_p = tokenizer_p.encode_plus(self._data)
input_r = tokenizer_r.encode_plus(self._data)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(self._data, self._data)
input_pairs_r = tokenizer_r.encode_plus(self._data, self._data)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask'])), input_p.keys()):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True)
input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_num_special_tokens_to_add_equal(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False))
self.assertEqual(tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True))
def test_max_length_equal(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence)
self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair)
def test_special_tokens_map_equal(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertSequenceEqual(tokenizer_p.special_tokens_map.items(), tokenizer_r.special_tokens_map.items())
def test_add_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
vocab_size = len(tokenizer_r)
self.assertEqual(tokenizer_r.add_tokens(''), 0)
self.assertEqual(tokenizer_r.add_tokens('testoken'), 1)
self.assertEqual(tokenizer_r.add_tokens(['testoken1', 'testtoken2']), 2)
self.assertEqual(len(tokenizer_r), (vocab_size + 3))
self.assertEqual(tokenizer_r.add_special_tokens({}), 0)
self.assertEqual(tokenizer_r.add_special_tokens({'bos_token': '[BOS]', 'eos_token': '[EOS]'}), 2)
self.assertRaises(AssertionError, tokenizer_r.add_special_tokens, {'additional_special_tokens': '<testtoken1>'})
self.assertEqual(tokenizer_r.add_special_tokens({'additional_special_tokens': ['<testtoken2>']}), 1)
self.assertEqual(tokenizer_r.add_special_tokens({'additional_special_tokens': ['<testtoken3>', '<testtoken4>']}), 2)
self.assertIn('<testtoken3>', tokenizer_r.special_tokens_map['additional_special_tokens'])
self.assertIsInstance(tokenizer_r.special_tokens_map['additional_special_tokens'], list)
self.assertGreaterEqual(len(tokenizer_r.special_tokens_map['additional_special_tokens']), 2)
self.assertEqual(len(tokenizer_r), (vocab_size + 8))
def test_offsets_mapping(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = 'Wonderful no inspiration example with subtoken'
pair = 'Along with an awesome pair'
tokens_with_offsets = tokenizer_r.encode_plus(text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets['offset_mapping']
self.assertEqual(len(offsets), len(tokens_with_offsets['input_ids']))
self.assertEqual(sum(tokens_with_offsets['special_tokens_mask']), added_tokens)
tokens_with_offsets = tokenizer_r.encode_plus(text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets['offset_mapping']
self.assertEqual(len(offsets), len(tokens_with_offsets['input_ids']))
self.assertEqual(sum(tokens_with_offsets['special_tokens_mask']), added_tokens)
def test_batch_encode_dynamic_overflowing(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})'):
if is_torch_available():
returned_tensor = 'pt'
elif is_tf_available():
returned_tensor = 'tf'
elif is_flax_available():
returned_tensor = 'jax'
else:
return
if ((not tokenizer.pad_token) or (tokenizer.pad_token_id < 0)):
return
tokens = tokenizer.encode_plus('HuggingFace is solving NLP one commit at a time', max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
tokens = tokenizer.batch_encode_plus(['HuggingFace is solving NLP one commit at a time'], max_length=6, padding=True, truncation='only_first', return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[(- 1)], 6)
tokens = tokenizer.batch_encode_plus(['HuggingFace is solving NLP one commit at a time', 'Very tiny input'], max_length=6, padding=True, truncation='only_first', return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[(- 1)], 6)
def test_compare_pretokenized_inputs(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
if (hasattr(tokenizer_p, 'add_prefix_space') and (not tokenizer_p.add_prefix_space)):
continue
pretokenized_input_simple = 'This is a sample input'.split()
pretokenized_input_pair = 'This is a sample pair'.split()
output_r = tokenizer_r.encode(pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False)
output_p = tokenizer_p.encode(pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False)
self.assertEqual(output_p, output_r)
kwargs = {'is_split_into_words': True, 'return_overflowing_tokens': False, 'return_special_tokens_mask': True, 'return_offsets_mapping': False}
batch_kwargs = {'is_split_into_words': True, 'return_overflowing_tokens': False, 'return_special_tokens_mask': True, 'return_offsets_mapping': False}
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
input_batch = (([pretokenized_input_simple] * 2) + [(pretokenized_input_simple + pretokenized_input_pair)])
output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
output_r = tokenizer_r.encode(pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True)
output_p = tokenizer_p.encode(pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
input_batch_pair = (([pretokenized_input_simple, pretokenized_input_pair] * 2) + [(pretokenized_input_simple + pretokenized_input_pair), pretokenized_input_pair])
output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
def test_create_token_type_ids(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
input_simple = [1, 2, 3]
input_pair = [1, 2, 3]
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_build_inputs_with_special_tokens(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
input_simple = tokenizer_p.encode('This is a sample input', add_special_tokens=False)
input_pair = tokenizer_p.encode('This is a sample pair', add_special_tokens=False)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_padding(self, max_length=50):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
input_r = tokenizer_r.encode('This is a simple input', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode('This is a simple input', max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode('This is a simple input', max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', padding='longest')
input_p = tokenizer_p.encode('This is a simple input', padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', padding=True)
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', padding='longest')
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
input_r = tokenizer_r.encode_plus('This is a simple input', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus('This is a simple input', max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus('This is a simple input', max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', padding='longest')
input_p = tokenizer_p.encode_plus('This is a simple input', padding=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', padding='longest')
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', padding=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, pad_to_max_length=True)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='max_length')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='longest')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], padding='longest')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], max_length=max_length, truncation=True, padding='max_length')
input_p = tokenizer_p.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], max_length=max_length, truncation=True, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], padding=True)
input_p = tokenizer_p.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], padding='longest')
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.encode_plus('This is a input 1')
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_p.encode_plus('This is a input 1')
input_p = tokenizer_p.pad(input_p)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
input_r = tokenizer_r.encode_plus('This is a input 1')
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus('This is a input 1')
input_p = tokenizer_p.pad(input_p, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_p.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_p = tokenizer_p.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_p.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_p = tokenizer_p.pad(input_p, max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.pad({'input_ids': [[], []]}, max_length=max_length, padding='max_length')
input_p = tokenizer_p.pad({'input_ids': [[], []]}, max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_padding_different_model_input_name(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
input_r = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_p = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_r['inputs'] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p['inputs'] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
tokenizer_r.model_input_names = (['inputs'] + tokenizer_r.model_input_names[1:])
tokenizer_p.model_input_names = (['inputs'] + tokenizer_p.model_input_names[1:])
input_r = tokenizer_r.pad(input_r, padding='longest')
input_p = tokenizer_r.pad(input_p, padding='longest')
max_length = len(input_p['inputs'][0])
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id, model_main_input_name='inputs')
def test_save_pretrained(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
for file_path in (tokenizer_r_files + tokenizer_p_files):
if (os.path.exists(file_path) and file_path.endswith('.json')):
check_json_file_has_correct_format(file_path)
self.assertTrue(any((('tokenizer.json' in f) for f in tokenizer_r_files)))
tokenizer_r_files = tuple((f for f in tokenizer_r_files if ('tokenizer.json' not in f)))
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
self.assertTrue(any((('tokenizer.json' in f) for f in tokenizer_r_files)))
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
def test_embeded_special_tokens(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = 'A, <mask> AllenNLP sentence.'
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True)
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if ('token_type_ids' in tokens_r):
self.assertEqual(sum(tokens_r['token_type_ids']), sum(tokens_p['token_type_ids']))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
for text in ['', ' ']:
no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), (len(with_special_tokens) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), (len(with_special_tokens) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(len(no_special_tokens[key]), (len(with_special_tokens[key]) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True)
for key in no_special_tokens.keys():
for (i_no, i_with) in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), (len(i_with) - simple_num_special_tokens_to_add))
def test_compare_prepare_for_model(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
string_sequence = 'Asserting that both tokenizers are equal'
python_output = tokenizer_p.prepare_for_model(tokenizer_p.encode(string_sequence, add_special_tokens=False))
rust_output = tokenizer_r.prepare_for_model(tokenizer_r.encode(string_sequence, add_special_tokens=False))
for key in python_output:
self.assertEqual(python_output[key], rust_output[key])
def test_special_tokens_initialization(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
added_tokens = [AddedToken('<special>', lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
r_output = tokenizer_r.encode('Hey this is a <special> token')
special_token_id = tokenizer_r.encode('<special>', add_special_tokens=False)[0]
self.assertTrue((special_token_id in r_output))
if self.test_slow_tokenizer:
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
p_output = tokenizer_p.encode('Hey this is a <special> token')
cr_output = tokenizer_cr.encode('Hey this is a <special> token')
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue((special_token_id in p_output))
self.assertTrue((special_token_id in cr_output))
def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self):
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for (tokenizer_class, tokenizer_utils) in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
with open(os.path.join(tmp_dir, 'special_tokens_map.json'), encoding='utf-8') as json_file:
special_tokens_map = json.load(json_file)
with open(os.path.join(tmp_dir, 'tokenizer_config.json'), encoding='utf-8') as json_file:
tokenizer_config = json.load(json_file)
special_tokens_map['additional_special_tokens'] = ['an_additional_special_token']
tokenizer_config['additional_special_tokens'] = ['an_additional_special_token']
with open(os.path.join(tmp_dir, 'special_tokens_map.json'), 'w', encoding='utf-8') as outfile:
json.dump(special_tokens_map, outfile)
with open(os.path.join(tmp_dir, 'tokenizer_config.json'), 'w', encoding='utf-8') as outfile:
json.dump(tokenizer_config, outfile)
tokenizer_without_change_in_init = tokenizer_class.from_pretrained(tmp_dir)
self.assertIn('an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens)
self.assertIn('an_additional_special_token', tokenizer_without_change_in_init.get_vocab())
self.assertEqual(['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])))
new_added_tokens = [AddedToken('a_new_additional_special_token', lstrip=True)]
tokenizer = tokenizer_class.from_pretrained(tmp_dir, additional_special_tokens=new_added_tokens)
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens)
self.assertEqual(['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])))
def test_training_new_tokenizer(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
inputs = new_tokenizer(['This is the first sentence', 'This sentence is different .'])
self.assertEqual(len(inputs['input_ids']), 2)
decoded_input = new_tokenizer.decode(inputs['input_ids'][0], skip_special_tokens=True)
expected_result = 'This is the first sentence'
if (tokenizer.backend_tokenizer.normalizer is not None):
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
self.assertSequenceEqual(tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_rust_tokenizer()
class_signature = inspect.signature(tokenizer.__class__)
if ('cls_token' in class_signature.parameters):
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: '<cls>'})
cls_id = new_tokenizer.get_vocab()['<cls>']
self.assertEqual(new_tokenizer.cls_token, '<cls>')
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove('additional_special_tokens')
special_tokens_map = {}
for token in special_tokens_list:
if (getattr(tokenizer, f'_{token}') is not None):
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f'{special_token}a'
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map)
for token in special_tokens_list:
if (getattr(tokenizer, f'_{token}') is None):
continue
special_token = getattr(tokenizer, token)
if (special_token in special_tokens_map):
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f'{token}_id'), new_id)
for special_token in tokenizer.all_special_tokens_extended:
if (isinstance(special_token, AddedToken) and (special_token.content not in special_tokens_map)):
self.assertTrue((special_token in new_tokenizer.all_special_tokens_extended), f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}")
elif isinstance(special_token, AddedToken):
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (isinstance(candidate, AddedToken) and (candidate.content == new_special_token_str) and (candidate.lstrip == special_token.lstrip) and (candidate.rstrip == special_token.rstrip) and (candidate.normalized == special_token.normalized) and (candidate.single_word == special_token.single_word)):
find = True
break
self.assertTrue(find, f"'{new_special_token_str}' doesn't appear in the list '{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as '{special_token}' in the list {tokenizer.all_special_tokens_extended}")
elif (special_token not in special_tokens_map):
self.assertTrue((special_token in new_tokenizer.all_special_tokens_extended), f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}")
else:
self.assertTrue((special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended))
inputs = new_tokenizer(['This is the first sentence', 'This sentence is different .'])
self.assertEqual(len(inputs['input_ids']), 2)
decoded_input = new_tokenizer.decode(inputs['input_ids'][0], skip_special_tokens=True)
expected_result = 'This is the first sentence'
if (tokenizer.backend_tokenizer.normalizer is not None):
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_tokenizer_mismatch_warning(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
with self.assertLogs('transformers', level='WARNING') as cm:
try:
if (self.tokenizer_class == BertTokenizer):
AlbertTokenizer.from_pretrained(pretrained_name)
else:
BertTokenizer.from_pretrained(pretrained_name)
except EnvironmentError as e:
error_message = str(e)
except (TypeError, AttributeError):
pass
finally:
logged_msg_target = 'The tokenizer class you load from this checkpoint is not the same type as the class this function is called from.'
raised_error_msg_target = "Can't load tokenizer for"
self.assertTrue((cm.records[0].message.startswith(logged_msg_target) if (len(cm.records) > 0) else (False or (raised_error_msg_target in error_message))))
try:
if (self.rust_tokenizer_class == BertTokenizerFast):
AlbertTokenizerFast.from_pretrained(pretrained_name)
else:
BertTokenizerFast.from_pretrained(pretrained_name)
except (TypeError, AttributeError):
pass
finally:
self.assertTrue(cm.records[0].message.startswith('The tokenizer class you load from this checkpoint is not the same type as the class this function is called from.'))
_torch
def test_saving_tokenizer_trainer(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_old = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs, use_fast=True)
tokenizer_old.save_pretrained(tmp_dir, legacy_format=False)
model = nn.Module()
tokenizer = self.rust_tokenizer_class.from_pretrained(tmp_dir)
training_args = TrainingArguments(output_dir=tmp_dir, do_train=True, no_cuda=True)
trainer = Trainer(model=model, args=training_args, tokenizer=tokenizer)
trainer.save_model(os.path.join(tmp_dir, 'checkpoint'))
self.assertIn('tokenizer.json', os.listdir(os.path.join(tmp_dir, 'checkpoint')))
def test_convert_tokens_to_string_format(self):
tokenizers = self.get_tokenizers(fast=True, do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tokens = ['this', 'is', 'a', 'test']
string = tokenizer.convert_tokens_to_string(tokens)
self.assertIsInstance(string, str)
def test_save_slow_from_fast_and_reload_fast(self):
if ((not self.test_slow_tokenizer) or (not self.test_rust_tokenizer)):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
with tempfile.TemporaryDirectory() as tmp_dir_1:
tokenizer_fast_old_1 = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs, use_fast=True)
tokenizer_file = os.path.join(tmp_dir_1, 'tokenizer.json')
tokenizer_fast_old_1.backend_tokenizer.save(tokenizer_file)
tokenizer_fast_old_2 = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs, use_fast=True, tokenizer_file=tokenizer_file)
tokenizer_fast_old_2.save_pretrained(tmp_dir_1, legacy_format=True)
tokenizer_slow = self.tokenizer_class.from_pretrained(tmp_dir_1)
with tempfile.TemporaryDirectory() as tmp_dir_2:
tokenizer_slow.save_pretrained(tmp_dir_2)
self.rust_tokenizer_class.from_pretrained(tmp_dir_2) |
def get_parser():
parser = argparse.ArgumentParser('cli_pytition')
subparsers = parser.add_subparsers(help='sub-command help', dest='action')
genorga = subparsers.add_parser('gen_orga', help='create Pytition Organization')
genorga.add_argument('--orga', '-o', type=str, required=True)
genusers = subparsers.add_parser('gen_user', help='create Pytition user')
genusers.add_argument('--username', '-u', type=str, required=True)
genusers.add_argument('--first-name', '-f', type=str, required=True)
genusers.add_argument('--last-name', '-l', type=str, required=True)
genusers.add_argument('--password', '-p', type=str, required=False, default='f00bar')
join_org = subparsers.add_parser('join_org', help='make a Pytition user join an Organization')
join_org.add_argument('--orga', '-o', type=str, required=True)
join_org.add_argument('--user', '-u', type=str, required=True)
genpet = subparsers.add_parser('generate_petitions', help='Generate petitions')
genpet.add_argument('--number', '-n', help='petition number', type=int, required=True)
genpet.add_argument('--orga', '-o', type=str, required=False)
genpet.add_argument('--user', '-u', type=str, required=False)
gensignature = subparsers.add_parser('generate_signatures', help='Generate signatures')
gensignature.add_argument('--number', '-n', help='number of signatures', type=int, default=1)
gensignature.add_argument('--petition-id', '-i', help='petition id', type=int)
gensignature.add_argument('--petition-title', '-t', help='petition title', type=str)
gensignature.add_argument('--first-name', help='first name of the signatory', type=str, default='bob')
gensignature.add_argument('--last-name', help='last name of the signatory', type=str, default='Bar')
gensignature.add_argument('--email', help='mail of the signatory', type=str, default='')
return parser |
class XmlDjangoLexer(DelegatingLexer):
name = 'XML+Django/Jinja'
aliases = ['xml+django', 'xml+jinja']
filenames = ['*.xml.j2', '*.xml.jinja2']
version_added = ''
alias_filenames = ['*.xml']
mimetypes = ['application/xml+django', 'application/xml+jinja']
url = '
def __init__(self, **options):
super().__init__(XmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = (DjangoLexer.analyse_text(text) - 0.01)
if looks_like_xml(text):
rv += 0.4
return rv |
def sharded_tensor_test_cases(use_gpu: bool) -> TestCase:
spec = ChunkShardingSpec(dim=0, placements=(['rank:0/cpu'] * 4))
srcs = [sharded_tensor.empty(spec, TENSOR_SHAPE) for _ in range(NUM_TENSORS)]
dsts = [sharded_tensor.empty(spec, TENSOR_SHAPE) for _ in range(NUM_TENSORS)]
for (idx, (src, dst)) in enumerate(zip(srcs, dsts)):
for shard in (src.local_shards() + dst.local_shards()):
shard.tensor.random_()
if (use_gpu and ((idx % 2) == 0)):
srcs[idx] = _sharded_tensor_to_gpu(src)
dsts[idx] = _sharded_tensor_to_gpu(dst)
entries = []
write_reqs = []
for (idx, tensor) in enumerate(srcs):
(entry, wrs) = ShardedTensorIOPreparer.prepare_write(storage_path=f'sharded_tensor_{idx}', obj=tensor)
entries.append(entry)
write_reqs.extend(wrs)
return (cast(List[torch.Tensor], srcs), entries, write_reqs, cast(List[torch.Tensor], dsts)) |
def trapezoidal_slit(top, base, wstep, center=0, norm_by='area', bplot=False, wunit='', scale=1, footerspacing=0, waveunit=None):
if (top > base):
(top, base) = (base, top)
FWHM = ((base + top) / 2)
b = ((2 * int(((top / wstep) // 2))) + 1)
slope = (1 / (FWHM - (b * wstep)))
a = (int((FWHM / wstep)) - b)
I = (1 - ((np.arange(0, (a + 1)) * slope) * wstep))
if (len(I) == 0):
I = np.ones(1)
if (I[(- 1)] < 0):
I[(- 1)] = 0
elif (I[(- 1)] > 0):
footerspacing += 1
f = int(footerspacing)
I = np.hstack((I, np.zeros(f)))
I = np.hstack((I[1:][::(- 1)], np.ones(b), I[1:]))
w = ((wstep * np.linspace(((- len(I)) / 2), (len(I) / 2), len(I))) + center)
if (norm_by == 'area'):
I /= np.trapz(I, x=w)
Iunit = '1/{0}'.format(wunit)
elif (norm_by == 'max'):
I /= np.max(I)
Iunit = ''
elif (norm_by is None):
Iunit = None
else:
raise ValueError('Unknown normalization type: `norm_by` = {0}'.format(norm_by))
I *= scale
if bplot:
plot_slit(w, I, wunit=wunit, Iunit=Iunit)
return (w, I) |
def solve_lla(sub_prob, penalty_func, init, init_upv=None, sp_init=None, sp_upv_init=None, sp_other_data=None, transform=abs, objective=None, max_steps=1, tol=1e-05, rel_crit=False, stop_crit='x_max', tracking_level=1, verbosity=0):
current = deepcopy(init)
current_upv = deepcopy(init_upv)
T = transform(current)
if (tol is None):
stop_crit = None
assert ((stop_crit is None) or (stop_crit in ['x_max', 'x_L2', 'loss']))
if (stop_crit is not None):
if (current_upv is not None):
prev = deepcopy(safe_concat(current, current_upv))
else:
prev = deepcopy(current)
history = {}
if ((stop_crit == 'loss') and (tracking_level == 0)):
tracking_level = 1
if (tracking_level >= 1):
if (objective is None):
raise ValueError('The objective function must be provided')
history['objective'] = [objective(value=current, upv=current_upv)]
if (tracking_level >= 2):
if (stop_crit in ['x_max', 'x_L2']):
history['x_diff'] = []
start_time = time()
step = 0
stop = False
for step in range(int(max_steps)):
if (verbosity >= 1):
print('Step {}, {:1.2f} seconds after start'.format((step + 1), (time() - start_time)))
if (sp_init is None):
sp_init = current
if (sp_upv_init is None):
sp_upv_init = current_upv
weights = penalty_func.grad(T)
(current, current_upv, sp_other_data) = sub_prob.solve(weights=weights, sp_init=sp_init, sp_upv_init=sp_upv_init, sp_other_data=sp_other_data)
T = None
if (tracking_level >= 1):
history['objective'].append(objective(value=current, upv=current_upv))
if (stop_crit in ['x_max', 'x_L2']):
if (current_upv is not None):
_current = safe_concat(current, current_upv)
else:
_current = current
(stop, diff_norm) = check_no_change(current=_current, prev=prev, tol=tol, rel_crit=rel_crit, norm=stop_crit[(- 3):])
if (tracking_level >= 2):
history['x_diff'].append(diff_norm)
elif (stop_crit == 'loss'):
current = history['objective'][(- 1)]
prev = history['objective'][(- 2)]
stop = check_decreasing_loss(current=current, prev=prev, tol=tol, rel_crit=rel_crit, on_increase='ignore')
if stop:
break
else:
if (stop_crit in ['x_max', 'x_L2']):
prev = deepcopy(_current)
if (T is None):
T = transform(current)
opt_info = {'runtime': (time() - start_time), 'history': history, 'stop_crit': stop_crit, 'stop': stop, 'step': step}
return (current, current_upv, sp_other_data, opt_info) |
def assert_string_classification_works(clf):
string_classes = ['cls{}'.format(x) for x in range(num_class)]
str_y_train = np.array(string_classes)[y_train]
clf.fit(X_train, str_y_train, batch_size=batch_size, epochs=epochs)
score = clf.score(X_train, str_y_train, batch_size=batch_size)
assert (np.isscalar(score) and np.isfinite(score))
preds = clf.predict(X_test, batch_size=batch_size)
assert (preds.shape == (num_test,))
for prediction in np.unique(preds):
assert (prediction in string_classes)
proba = clf.predict_proba(X_test, batch_size=batch_size)
assert (proba.shape == (num_test, num_class))
assert np.allclose(np.sum(proba, axis=1), np.ones(num_test)) |
class GT(CNF, object):
def __init__(self, size, topv=0, verb=False):
super(GT, self).__init__()
vpool = IDPool(start_from=(topv + 1))
var = (lambda i, j: vpool.id('v_{0}_{1}'.format(i, j)))
for i in range(1, size):
for j in range((i + 1), (size + 1)):
self.append([(- var(i, j)), (- var(j, i))])
for i in range(1, (size + 1)):
for j in range(1, (size + 1)):
if (j != i):
for k in range(1, (size + 1)):
if ((k != i) and (k != j)):
self.append([(- var(i, j)), (- var(j, k)), var(i, k)])
for j in range(1, (size + 1)):
self.append([var(k, j) for k in range(1, (size + 1)) if (k != j)])
if verb:
self.comments.append('c GT formula for {0} elements'.format(size))
for i in range(1, (size + 1)):
for j in range(1, (size + 1)):
if (i != j):
self.comments.append('c orig pair: {0}; bool var: {1}'.format((i, j), var(i, j))) |
def run_kcell_complex(cell, nk):
abs_kpts = cell.make_kpts(nk, wrap_around=True)
kmf = pbcscf.KRHF(cell, abs_kpts)
kmf.conv_tol = 1e-12
ekpt = kmf.scf()
kmf.mo_coeff = [kmf.mo_coeff[i].astype(np.complex128) for i in range(np.prod(nk))]
mp = pyscf.pbc.mp.kmp2.KMP2(kmf).run()
return (ekpt, mp.e_corr) |
class LazyTensor(AbstractLazyTensor):
def __init__(self, function, args):
self.function = function
self.args = args
def tensor(self):
tensor_args = []
for arg in self.args:
if issubclass(arg.__class__, AbstractLazyTensor):
tensor_args.append(arg.tensor())
else:
tensor_args.append(arg)
return self.function(*tensor_args)
def __str__(self, level=0):
ret = ((((' ' * level) + 'LazyTensor:') + self.function.__name__) + '\n')
for arg in self.args:
if issubclass(arg.__class__, AbstractLazyTensor):
ret += (arg.__str__((level + 1)) + '\n')
elif (arg == Ellipsis):
ret += ((' ' * (level + 1)) + '...\n')
else:
ret += (((' ' * (level + 1)) + str(arg)) + '\n')
return ret |
class ModelSingleTagFieldConcreteInheritanceTest(ModelSingleTagFieldTest):
manage_models = [test_models.SingleTagFieldConcreteInheritanceModel]
def setUpExtra(self):
self.test_model = test_models.SingleTagFieldConcreteInheritanceModel
self.tag_model = test_models.SingleTagFieldConcreteInheritanceModel.title.tag_model
self.tag_field = test_models.SingleTagFieldConcreteInheritanceModel.title
def test_save_deleted_instance(self): |
class Effect1773(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Hybrid Turret')), 'falloff', ship.getModifiedItemAttr('shipBonusGF2'), skill='Gallente Frigate', **kwargs) |
class Entity(MessageFilter):
__slots__ = ('entity_type',)
def __init__(self, entity_type: str):
self.entity_type: str = entity_type
super().__init__(name=f'filters.Entity({self.entity_type})')
def filter(self, message: Message) -> bool:
return any(((entity.type == self.entity_type) for entity in message.entities)) |
def strip_query(query: str) -> Tuple[(List[str], List[str])]:
(query_keywords, all_values) = ([], [])
toks = sqlparse.parse(query)[0].flatten()
values = [t.value for t in toks if ((t.ttype == sqlparse.tokens.Literal.String.Single) or (t.ttype == sqlparse.tokens.Literal.String.Symbol))]
for val in values:
all_values.append(val)
query = query.replace(val.strip(), VALUE_NUM_SYMBOL)
query_tokenized = query.split()
float_nums = re.findall('[-+]?\\d*\\.\\d+', query)
all_values += [qt for qt in query_tokenized if (qt in float_nums)]
query_tokenized = [(VALUE_NUM_SYMBOL if (qt in float_nums) else qt) for qt in query_tokenized]
query = ' '.join(query_tokenized)
int_nums = [i.strip() for i in re.findall('[^tT]\\d+', query)]
all_values += [qt for qt in query_tokenized if (qt in int_nums)]
query_tokenized = [(VALUE_NUM_SYMBOL if (qt in int_nums) else qt) for qt in query_tokenized]
for tok in query_tokenized:
if ('.' in tok):
table = re.findall('[Tt]\\d+\\.', tok)
if (len(table) > 0):
to = tok.replace('.', ' . ').split()
to = [t.lower() for t in to if (len(t) > 0)]
query_keywords.extend(to)
else:
query_keywords.append(tok.lower())
elif (len(tok) > 0):
query_keywords.append(tok.lower())
return (query_keywords, all_values) |
def main():
parser = argparse.ArgumentParser(description='PyTorch Object Detection Inference')
parser.add_argument('--config-file', default='/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml', metavar='FILE', help='path to config file')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
distributed = (num_gpus > 1)
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ''
logger = setup_logger('maskrcnn_benchmark', save_dir, get_rank())
logger.info('Using {} GPUs'.format(num_gpus))
logger.info(cfg)
logger.info('Collecting env info (might take some time)')
logger.info(('\n' + collect_env_info()))
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
output_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ('bbox',)
if cfg.MODEL.MASK_ON:
iou_types = (iou_types + ('segm',))
if cfg.MODEL.KEYPOINT_ON:
iou_types = (iou_types + ('keypoints',))
output_folders = ([None] * len(cfg.DATASETS.TEST))
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for (idx, dataset_name) in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for (output_folder, dataset_name, data_loader_val) in zip(output_folders, dataset_names, data_loaders_val):
inference(model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=cfg.MODEL.RPN_ONLY, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder)
synchronize() |
class LocalIndexedModel(Model):
class Meta():
table_name = 'LocalIndexedModel'
user_name = UnicodeAttribute(hash_key=True)
email = UnicodeAttribute()
email_index = LocalEmailIndex()
numbers = NumberSetAttribute()
aliases = UnicodeSetAttribute()
icons = BinarySetAttribute(legacy_encoding=False) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
send_example_telemetry('run_ner', model_args, data_args)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.train_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
if training_args.do_train:
column_names = raw_datasets['train'].column_names
features = raw_datasets['train'].features
else:
column_names = raw_datasets['validation'].column_names
features = raw_datasets['validation'].features
if (data_args.text_column_name is not None):
text_column_name = data_args.text_column_name
elif ('tokens' in column_names):
text_column_name = 'tokens'
else:
text_column_name = column_names[0]
if (data_args.label_column_name is not None):
label_column_name = data_args.label_column_name
elif (f'{data_args.task_name}_tags' in column_names):
label_column_name = f'{data_args.task_name}_tags'
else:
label_column_name = column_names[1]
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = list(unique_labels)
label_list.sort()
return label_list
labels_are_int = isinstance(features[label_column_name].feature, ClassLabel)
if labels_are_int:
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(raw_datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer_name_or_path = (model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path)
if (config.model_type in {'bloom', 'gpt2', 'roberta'}):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), ignore_mismatched_sizes=model_args.ignore_mismatched_sizes)
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at to find the model types that meet this requirement')
if (model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id):
if (sorted(model.config.label2id.keys()) == sorted(label_list)):
if labels_are_int:
label_to_id = {i: int(model.config.label2id[l]) for (i, l) in enumerate(label_list)}
label_list = [model.config.id2label[i] for i in range(num_labels)]
else:
label_list = [model.config.id2label[i] for i in range(num_labels)]
label_to_id = {l: i for (i, l) in enumerate(label_list)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {sorted(model.config.label2id.keys())}, dataset labels: {sorted(label_list)}.
Ignoring the model labels as a result.''')
model.config.label2id = {l: i for (i, l) in enumerate(label_list)}
model.config.id2label = dict(enumerate(label_list))
b_to_i_label = []
for (idx, label) in enumerate(label_list):
if (label.startswith('B-') and (label.replace('B-', 'I-') in label_list)):
b_to_i_label.append(label_list.index(label.replace('B-', 'I-')))
else:
b_to_i_label.append(idx)
padding = ('max_length' if data_args.pad_to_max_length else False)
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples[text_column_name], padding=padding, truncation=True, max_length=data_args.max_seq_length, is_split_into_words=True)
labels = []
for (i, label) in enumerate(examples[label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if (word_idx is None):
label_ids.append((- 100))
elif (word_idx != previous_word_idx):
label_ids.append(label_to_id[label[word_idx]])
elif data_args.label_all_tokens:
label_ids.append(b_to_i_label[label_to_id[label[word_idx]]])
else:
label_ids.append((- 100))
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs['labels'] = labels
return tokenized_inputs
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
if training_args.do_eval:
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if training_args.do_predict:
if ('test' not in raw_datasets):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets['test']
if (data_args.max_predict_samples is not None):
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_dataset.map(tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None))
metric = evaluate.load('seqeval')
def compute_metrics(p):
(predictions, labels) = p
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
final_results = {}
for (key, value) in results.items():
if isinstance(value, dict):
for (n, v) in value.items():
final_results[f'{key}_{n}'] = v
else:
final_results[key] = value
return final_results
else:
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics)
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model()
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
(predictions, labels, metrics) = trainer.predict(predict_dataset, metric_key_prefix='predict')
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
trainer.log_metrics('predict', metrics)
trainer.save_metrics('predict', metrics)
output_predictions_file = os.path.join(training_args.output_dir, 'predictions.txt')
if trainer.is_world_process_zero():
with open(output_predictions_file, 'w') as writer:
for prediction in true_predictions:
writer.write((' '.join(prediction) + '\n'))
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'token-classification'}
if (data_args.dataset_name is not None):
kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
kwargs['dataset_args'] = data_args.dataset_config_name
kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
kwargs['dataset'] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs) |
class Encoder(PymiereBaseObject):
def __init__(self, pymiere_id=None):
super(Encoder, self).__init__(pymiere_id)
def ENCODE_ENTIRE(self):
return self._eval_on_this_object('ENCODE_ENTIRE')
_ENTIRE.setter
def ENCODE_ENTIRE(self, ENCODE_ENTIRE):
raise AttributeError("Attribute 'ENCODE_ENTIRE' is read-only")
def ENCODE_IN_TO_OUT(self):
return self._eval_on_this_object('ENCODE_IN_TO_OUT')
_IN_TO_OUT.setter
def ENCODE_IN_TO_OUT(self, ENCODE_IN_TO_OUT):
raise AttributeError("Attribute 'ENCODE_IN_TO_OUT' is read-only")
def ENCODE_WORKAREA(self):
return self._eval_on_this_object('ENCODE_WORKAREA')
_WORKAREA.setter
def ENCODE_WORKAREA(self, ENCODE_WORKAREA):
raise AttributeError("Attribute 'ENCODE_WORKAREA' is read-only")
def bind(self, eventName, function):
self._check_type(eventName, str, 'arg "eventName" of function "Encoder.bind"')
self._check_type(function, any, 'arg "function" of function "Encoder.bind"')
self._eval_on_this_object('bind({}, {})'.format(_format_object_to_es(eventName), _format_object_to_es(function)))
def unbind(self, eventName):
self._check_type(eventName, str, 'arg "eventName" of function "Encoder.unbind"')
self._eval_on_this_object('unbind({})'.format(_format_object_to_es(eventName)))
def setTimeout(self, eventName, function, milliseconds):
self._check_type(eventName, str, 'arg "eventName" of function "Encoder.setTimeout"')
self._check_type(function, any, 'arg "function" of function "Encoder.setTimeout"')
self._check_type(milliseconds, float, 'arg "milliseconds" of function "Encoder.setTimeout"')
self._eval_on_this_object('setTimeout({}, {}, {})'.format(_format_object_to_es(eventName), _format_object_to_es(function), _format_object_to_es(milliseconds)))
def encodeSequence(self, sequence, outputFilePath, presetPath, WorkAreaType, removeOnCompletion, startQueueImmediately=False):
self._check_type(sequence, Sequence, 'arg "sequence" of function "Encoder.encodeSequence"')
self._check_type(outputFilePath, str, 'arg "outputFilePath" of function "Encoder.encodeSequence"')
self._check_type(presetPath, str, 'arg "presetPath" of function "Encoder.encodeSequence"')
self._check_type(WorkAreaType, float, 'arg "WorkAreaType" of function "Encoder.encodeSequence"')
self._check_type(removeOnCompletion, float, 'arg "removeOnCompletion" of function "Encoder.encodeSequence"')
self._check_type(startQueueImmediately, float, 'arg "startQueueImmediately" of function "Encoder.encodeSequence"')
return self._eval_on_this_object('encodeSequence({}, {}, {}, {}, {}, {})'.format(_format_object_to_es(sequence), _format_object_to_es(outputFilePath), _format_object_to_es(presetPath), _format_object_to_es(WorkAreaType), _format_object_to_es(removeOnCompletion), _format_object_to_es(startQueueImmediately)))
def encodeProjectItem(self, projectItem, outputFilePath, presetPath, WorkAreaType, removeOnCompletion, startQueueImmediately):
self._check_type(projectItem, ProjectItem, 'arg "projectItem" of function "Encoder.encodeProjectItem"')
self._check_type(outputFilePath, str, 'arg "outputFilePath" of function "Encoder.encodeProjectItem"')
self._check_type(presetPath, str, 'arg "presetPath" of function "Encoder.encodeProjectItem"')
self._check_type(WorkAreaType, float, 'arg "WorkAreaType" of function "Encoder.encodeProjectItem"')
self._check_type(removeOnCompletion, float, 'arg "removeOnCompletion" of function "Encoder.encodeProjectItem"')
self._check_type(startQueueImmediately, float, 'arg "startQueueImmediately" of function "Encoder.encodeProjectItem"')
return self._eval_on_this_object('encodeProjectItem({}, {}, {}, {}, {}, {})'.format(_format_object_to_es(projectItem), _format_object_to_es(outputFilePath), _format_object_to_es(presetPath), _format_object_to_es(WorkAreaType), _format_object_to_es(removeOnCompletion), _format_object_to_es(startQueueImmediately)))
def encodeFile(self, inputFilePath, outputFilePath, presetPath, removeOnCompletion, startTime, stopTime, startQueueImmediately):
self._check_type(inputFilePath, str, 'arg "inputFilePath" of function "Encoder.encodeFile"')
self._check_type(outputFilePath, str, 'arg "outputFilePath" of function "Encoder.encodeFile"')
self._check_type(presetPath, str, 'arg "presetPath" of function "Encoder.encodeFile"')
self._check_type(removeOnCompletion, float, 'arg "removeOnCompletion" of function "Encoder.encodeFile"')
self._check_type(startQueueImmediately, float, 'arg "startQueueImmediately" of function "Encoder.encodeFile"')
return self._eval_on_this_object('encodeFile({}, {}, {}, {}, {}, {}, {})'.format(_format_object_to_es(inputFilePath), _format_object_to_es(outputFilePath), _format_object_to_es(presetPath), _format_object_to_es(removeOnCompletion), _format_object_to_es(startTime), _format_object_to_es(stopTime), _format_object_to_es(startQueueImmediately)))
def startBatch(self):
return self._eval_on_this_object('startBatch()')
def launchEncoder(self):
return self._eval_on_this_object('launchEncoder()')
def setSidecarXMPEnabled(self, enable):
self._check_type(enable, float, 'arg "enable" of function "Encoder.setSidecarXMPEnabled"')
self._eval_on_this_object('setSidecarXMPEnabled({})'.format(_format_object_to_es(enable)))
def setEmbeddedXMPEnabled(self, enable):
self._check_type(enable, float, 'arg "enable" of function "Encoder.setEmbeddedXMPEnabled"')
self._eval_on_this_object('setEmbeddedXMPEnabled({})'.format(_format_object_to_es(enable)))
def getExporters(self):
return ExporterCollection(**self._eval_on_this_object('getExporters()')) |
def dummy_dist(tmp_path_factory):
basedir = tmp_path_factory.mktemp('dummy_dist')
basedir.joinpath('setup.py').write_text(SETUPPY_EXAMPLE, encoding='utf-8')
for fname in (DEFAULT_LICENSE_FILES | OTHER_IGNORED_FILES):
basedir.joinpath(fname).write_text('', encoding='utf-8')
licensedir = basedir.joinpath('licenses')
licensedir.mkdir()
licensedir.joinpath('DUMMYFILE').write_text('', encoding='utf-8')
return basedir |
class _DepthwiseConv(nn.Module):
def __init__(self, in_channels, out_channels, stride, norm_layer=nn.BatchNorm2d, **kwargs):
super(_DepthwiseConv, self).__init__()
self.conv = nn.Sequential(_ConvBNReLU(in_channels, in_channels, 3, stride, 1, groups=in_channels, norm_layer=norm_layer), _ConvBNReLU(in_channels, out_channels, 1, norm_layer=norm_layer))
def forward(self, x):
return self.conv(x) |
class ListColumnCpu(ColumnCpuMixin, ListColumn):
def __init__(self, device, dtype, data, offsets, mask):
assert dt.is_list(dtype)
ListColumn.__init__(self, device, dtype)
self._data = velox.Column((velox.VeloxArrayType(get_velox_type(dtype.item_dtype)) if (dtype.fixed_size == (- 1)) else velox.VeloxFixedArrayType(dtype.fixed_size, get_velox_type(dtype.item_dtype))))
if (len(data) > 0):
self.append(data)
self._finalized = False
self.list = ListMethodsCpu(self)
def _empty(device, dtype: dt.List):
return ListColumnCpu(device, dtype, Scope._EmptyColumn(dtype.item_dtype, device), ar.array('I', [0]), ar.array('b'))
def _from_pysequence(device: str, data: List[List], dtype: dt.List):
if dt.is_primitive(dtype.item_dtype):
velox_column = velox.Column(get_velox_type(dtype), data)
return ColumnCpuMixin._from_velox(device, dtype, velox_column, True)
else:
warnings.warn('Complex types are not supported (properly) for ListColumnCpu._from_pysequence yet. Falling back to the default (inefficient) implementation')
assert (len(data) <= 100000), f'The default _from_pysequence implementation will be too slow for {len(data)} elements'
col = ListColumnCpu._empty(device, dtype)
for i in data:
col._append(i)
return col._finalize()
def _append_null(self):
if self._finalized:
raise AttributeError('It is already finalized.')
self._data.append_null()
def _append_value(self, value):
if self._finalized:
raise AttributeError('It is already finalized.')
elif (value is None):
self._data.append_null()
else:
new_element_column = ta.column(self._dtype.item_dtype)
new_element_column = new_element_column.append(value)
if ((self._dtype.fixed_size != (- 1)) and (self.dtype.fixed_size != len(new_element_column))):
raise ValueError('value incompatible with list fixed_size')
self._data.append(new_element_column._data)
def _finalize(self):
self._finalized = True
return self
def __len__(self):
return len(self._data)
def null_count(self):
return self._data.get_null_count()
def _getmask(self, i):
if (i < 0):
i += len(self._data)
return self._data.is_null_at(i)
def _getdata(self, i):
if (i < 0):
i += len(self._data)
if self._data.is_null_at(i):
return self.dtype.default_value()
else:
return list(ColumnCpuMixin._from_velox(self.device, self._dtype.item_dtype, self._data[i], False))
def __str__(self):
return f"Column([{', '.join((('None' if (i is None) else str(i)) for i in self))}])"
def __repr__(self):
tab = tabulate([[('None' if (i is None) else str(i))] for i in self], tablefmt='plain', showindex=True)
typ = f'dtype: {self._dtype}, length: {self.length}, null_count: {self.null_count}'
return ((tab + dt.NL) + typ)
def __iter__(self):
for i in range(len(self)):
item = self._get(i)
if (item is None):
(yield item)
else:
(yield list(item))
def _to_tensor_default(self, _propagate_py_list=True):
pytorch.ensure_available()
import torch
arrow_array = self.to_arrow()
elements = ColumnCpuMixin._from_velox(self.device, self._dtype.item_dtype, self._data.elements(), True)._to_tensor_default()
if (isinstance(elements, list) and _propagate_py_list):
return [(elements[arrow_array.offsets[i].as_py():arrow_array.offsets[(i + 1)].as_py()] if (self[i] is not None) else None) for i in range(len(self))]
offsets = torch.tensor(arrow_array.offsets.to_numpy(), dtype=torch.int32)
res = pytorch.PackedList(values=elements, offsets=offsets)
if (not self._dtype.nullable):
return res
presence = torch.tensor(arrow_array.is_valid().to_numpy(zero_copy_only=False), dtype=torch.bool)
return pytorch.WithPresence(values=res, presence=presence)
def _to_tensor_pad_sequence(self, batch_first: bool, padding_value):
pytorch.ensure_available()
assert dt.is_numerical(self.dtype.item_dtype)
assert (self.null_count == 0)
import torch
from torch.nn.utils.rnn import pad_sequence
packed_list: Union[(pytorch.WithPresence, pytorch.PackedList)] = self._to_tensor_default()
if isinstance(packed_list, pytorch.WithPresence):
assert torch.all(packed_list.presence)
packed_list = packed_list.values
flattened_values = packed_list.values
if isinstance(flattened_values, pytorch.WithPresence):
assert torch.all(flattened_values.presence)
flattened_values = flattened_values.values
unpad_tensors: List[torch.tensor] = [flattened_values[packed_list.offsets[i]:packed_list.offsets[(i + 1)]] for i in range(len(self))]
pad_token_ids = pad_sequence(unpad_tensors, batch_first=batch_first, padding_value=float(padding_value))
return pad_token_ids |
def test_create_translator_gates_field(echoes_game_description):
c = NodeIdentifier.create
def make_req(item_id: int):
return RequirementAnd([ResourceRequirement.simple(ItemResourceInfo(0, 'Scan Visor', 'Scan', 1, frozendict({'item_id': 9}))), ResourceRequirement.simple(ItemResourceInfo(1, 'Other', 'Other', 1, frozendict({'item_id': item_id})))])
gate_assignment = {c('Temple Grounds', 'Meeting Grounds', 'Translator Gate'): make_req(0), c('Temple Grounds', 'Industrial Site', 'Translator Gate'): make_req(1), c('Temple Grounds', 'Path of Eyes', 'Translator Gate'): make_req(0)}
result = patch_data_factory._create_translator_gates_field(echoes_game_description, gate_assignment)
assert (result == [{'gate_index': 1, 'translator_index': 0}, {'gate_index': 3, 'translator_index': 1}, {'gate_index': 4, 'translator_index': 0}]) |
def test_FreiHand2D_dataset():
dataset = 'FreiHandDataset'
dataset_info = Config.fromfile('configs/_base_/datasets/freihand2d.py').dataset_info
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(num_output_channels=21, dataset_joints=21, dataset_channel=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]], inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
data_cfg = dict(image_size=[224, 224], heatmap_size=[56, 56], num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'])
data_cfg_copy = copy.deepcopy(data_cfg)
_ = dataset_class(ann_file='tests/data/freihand/test_freihand.json', img_prefix='tests/data/freihand/', data_cfg=data_cfg_copy, pipeline=[], dataset_info=dataset_info, test_mode=True)
custom_dataset = dataset_class(ann_file='tests/data/freihand/test_freihand.json', img_prefix='tests/data/freihand/', data_cfg=data_cfg_copy, pipeline=[], dataset_info=dataset_info, test_mode=False)
assert (custom_dataset.dataset_name == 'freihand')
assert (custom_dataset.test_mode is False)
assert (custom_dataset.num_images == 8)
_ = custom_dataset[0]
outputs = convert_db_to_output(custom_dataset.db)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK', 'EPE', 'AUC'])
assert_almost_equal(infos['PCK'], 1.0)
assert_almost_equal(infos['AUC'], 0.95)
assert_almost_equal(infos['EPE'], 0.0)
with pytest.raises(KeyError):
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP') |
class Object():
_add_threshold = 0.02
_adds_threshold = 0.01
def __init__(self, class_id, pcd, is_symmetric, n_votes=3):
self.class_id = class_id
self._pcd = pcd
self._is_symmetric = is_symmetric
self._n_votes = n_votes
self._poses = queue.deque([], 6)
self.is_spawned = False
def pose(self):
if (not self.is_spawned):
return
return self._poses[(- 1)]
def __repr__(self):
return f'{self.__class__.__name__}(class_id={self.class_id}, n_poses={len(self._poses)}, is_spawned={self.is_spawned})'
def append_pose(self, pose):
if (not self.is_spawned):
self._poses.append(pose)
def validate(self):
if self.is_spawned:
return True
if (len(self._poses) < self._n_votes):
return False
latest_pose = self._poses[(- 1)]
poses = np.array(list(itertools.islice(self._poses, (len(self._poses) - 1))))
(add, add_s) = morefusion.metrics.average_distance(([self._pcd] * len(poses)), ([latest_pose] * len(poses)), poses)
if self._is_symmetric:
add = add_s
del add_s
add = np.array(add)
if self._is_symmetric:
threshold = self._adds_threshold
else:
threshold = self._add_threshold
if ((add < threshold).sum() >= (self._n_votes - 1)):
self.is_spawned = True
self._poses = tuple(self._poses)
return True
return False |
class LearnedGridTensorQuantizer(TensorQuantizer):
def __init__(self, bitwidth: int, round_mode: libpymo.RoundingMode, quant_scheme: QuantScheme, use_symmetric_encodings: bool, enabled_by_default: bool, data_type: QuantizationDataType):
if (data_type != QuantizationDataType.int):
raise ValueError('Only QuantizationDataType.int is supported for LearnedGridTensorQuantizer')
super(LearnedGridTensorQuantizer, self).__init__(bitwidth, round_mode, quant_scheme, use_symmetric_encodings, enabled_by_default, data_type)
self.wrapper_ref = None
self.name = None
self.round_ste_func = grad_fn.RoundStraightThrough.apply
(self.scaling, self.offset) = (None, None)
self.device = None
self._ch_axis = 0
_cache()
def get_n_and_p(bitwidth: int, use_symmetric_encoding: bool, use_strict_symmetric: bool, device: Union[(torch.device, str)]) -> Tuple[(torch.Tensor, torch.Tensor)]:
if ((not use_symmetric_encoding) and use_strict_symmetric):
raise ValueError('Strict symmetric can be enabled only when using symmetric encoding')
n = 0.0
p = (torch.pow(torch.tensor([2]), bitwidth) - 1)
if (use_symmetric_encoding and use_strict_symmetric):
p -= 1
n = torch.tensor([n], device=device)
p = torch.tensor([p], device=device)
return (n, p)
def n(self, device=None) -> torch.Tensor:
(n, _) = self.get_n_and_p(self.bitwidth, self.use_strict_symmetric, self.use_strict_symmetric, (device or self.device))
return n
def p(self, device=None) -> torch.Tensor:
(_, p) = self.get_n_and_p(self.bitwidth, self.use_strict_symmetric, self.use_strict_symmetric, (device or self.device))
return p
def get_effective_encoding(self) -> Optional[Union[(libpymo.TfEncoding, List[libpymo.TfEncoding])]]:
if (not self.enabled):
return None
encodings = self.encoding
if (not encodings):
return None
if isinstance(encodings, libpymo.TfEncoding):
encodings = [encodings]
effective_encodings = []
for tf_encoding in encodings:
if is_non_strict_symmetric(self.use_symmetric_encodings, self.use_strict_symmetric, self.is_unsigned_symmetric):
effective_encoding = libpymo.TfEncoding()
effective_encoding.min = (tf_encoding.min - tf_encoding.delta)
effective_encoding.max = tf_encoding.max
effective_encoding.offset = tf_encoding.offset
effective_encoding.delta = tf_encoding.delta
effective_encoding.bw = tf_encoding.bw
effective_encodings.append(effective_encoding)
else:
effective_encodings.append(tf_encoding)
if (len(effective_encodings) == 1):
effective_encodings = effective_encodings[0]
return effective_encodings
def encoding(self) -> Union[(None, libpymo.TfEncoding, List[libpymo.TfEncoding])]:
if (not self.enabled):
return None
if ((self.bitwidth == 32) or (self.data_type == QuantizationDataType.float)):
return None
return self._compute_updated_encoding()
def encoding(self, encoding: Union[(libpymo.TfEncoding, List[libpymo.TfEncoding])]):
if self.enabled:
if ((self.bitwidth == 32) or (self.data_type == QuantizationDataType.float)):
return
if (encoding is None):
raise RuntimeError('Encodings cannot be None if Quantizer is enabled.')
bitwidth = (encoding[0].bw if isinstance(encoding, List) else encoding.bw)
if (bitwidth != self.bitwidth):
raise RuntimeError(f'Bitwidth mismatched. The bitwidth for quantizer is {self.bitwidth}, but the bitwidth in encodings is {bitwidth}. If the intent is to change the bitwidth, please set quantizer bitwidth to {bitwidth} first.')
if self._is_encoding_frozen:
raise RuntimeError('Encoding can be set only when it is not frozen.')
self._set_encoding_min_max_parameters(encoding)
def channel_axis(self) -> int:
return self._ch_axis
_min_max_fixed_vals.setter
def encoding_min_max_fixed_vals(self, min_max_vals: Optional[Tuple[(float, float)]]):
self._encoding_min_max_fixed_vals = min_max_vals
def __str__(self):
stream = io.StringIO(newline='\n')
stream.write('LearnedGrid TensorQuantizer:\n')
stream.write(' quant-scheme:{}, round_mode={}, bitwidth={}, enabled={}\n'.format(self._quant_scheme, self.round_mode, self.bitwidth, self.enabled))
if self.encoding:
encoding = self.get_effective_encoding()
if isinstance(encoding, libpymo.TfEncoding):
encoding = [encoding]
for tf_encoding in encoding:
stream.write(' min:{}, max={}, delta={}, offset={}\n'.format(tf_encoding.min, tf_encoding.max, tf_encoding.delta, tf_encoding.offset))
else:
stream.write(' no encoding\n')
return stream.getvalue()
def compute_scaling_offset(self, encoding_min: Union[(None, torch.Tensor)], encoding_max: Union[(None, torch.Tensor)]) -> Tuple[(Union[(None, torch.Tensor)], Union[(None, torch.Tensor)])]:
if ((encoding_min is None) or (encoding_max is None)):
return (None, None)
(scaling, offset, _) = grad_fn.get_computed_encodings(self.bitwidth, encoding_min, encoding_max, self.use_symmetric_encodings, self.use_strict_symmetric, self.is_unsigned_symmetric)
return (scaling, offset)
def quantize_dequantize(self, tensor: torch.Tensor, encoding_min: torch.nn.Parameter, encoding_max: torch.nn.Parameter) -> torch.Tensor:
if self.enabled:
tensor = QuantizeDequantizeFunc.apply(tensor, encoding_min, encoding_max, self)
return tensor
def _compute_updated_encoding(self) -> Union[(libpymo.TfEncoding, List[libpymo.TfEncoding])]:
encoding_min = getattr(self.wrapper_ref, (self.name + '_encoding_min'))
encoding_max = getattr(self.wrapper_ref, (self.name + '_encoding_max'))
if ((encoding_min is None) or (encoding_max is None)):
return None
(scale, offset) = self.compute_scaling_offset(encoding_min.float(), encoding_max.float())
assert (scale is not None)
assert (offset is not None)
scale = scale.expand_as(encoding_min)
offset = offset.expand_as(encoding_min)
if ((not self.use_symmetric_encodings) or self.is_unsigned_symmetric):
adjusted_min = (scale * offset)
encoding_max = ((encoding_max - encoding_min) + adjusted_min)
encoding_min = adjusted_min
encodings = []
for (min_, max_, scale_, offset_) in zip(encoding_min, encoding_max, scale, offset):
tf_encoding = libpymo.TfEncoding()
tf_encoding.min = min_
tf_encoding.max = max_
tf_encoding.delta = scale_
tf_encoding.offset = offset_
tf_encoding.bw = self.bitwidth
encodings.append(tf_encoding)
if (len(encodings) == 1):
encodings = encodings[0]
return encodings
def _set_encoding_min_max_parameters(self, encodings: Union[(libpymo.TfEncoding, List[libpymo.TfEncoding])]):
enc_min_param = (self.name + '_encoding_min')
enc_max_param = (self.name + '_encoding_max')
params = self.wrapper_ref._parameters
if isinstance(encodings, List):
assert isinstance(encodings[0], libpymo.TfEncoding), 'Encodings should be a libpymo.TfEncoding() object'
encodings_min = [enc.min for enc in encodings]
encodings_max = [enc.max for enc in encodings]
else:
assert isinstance(encodings, libpymo.TfEncoding), 'Encodings should be a libpymo.TfEncoding() object'
encodings_min = [encodings.min]
encodings_max = [encodings.max]
params[enc_min_param] = torch.nn.Parameter(torch.FloatTensor(encodings_min).to(self.wrapper_ref.device), requires_grad=True)
params[enc_max_param] = torch.nn.Parameter(torch.FloatTensor(encodings_max).to(self.wrapper_ref.device), requires_grad=True)
def freeze_encoding(self):
enc_min_param = (self.name + '_encoding_min')
enc_max_param = (self.name + '_encoding_max')
params = self.wrapper_ref._parameters
if ((params[enc_min_param] is None) and (params[enc_max_param] is None)):
raise RuntimeError('Encoding can be frozen only when it is not None.')
self._is_encoding_frozen = True
params[enc_min_param].requires_grad = False
params[enc_max_param].requires_grad = False |
_tokenizers
class ESMTokenizationTest(unittest.TestCase):
tokenizer_class = EsmTokenizer
def setUp(self):
super().setUp()
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens: List[str] = ['<cls>', '<pad>', '<eos>', '<unk>', 'L', 'A', 'G', 'V', 'S', 'E', 'R', 'T', 'I', 'D', 'P', 'K', 'Q', 'N', 'F', 'Y', 'M', 'H', 'W', 'C', 'X', 'B', 'U', 'Z', 'O', '.', '-', '<null_1>', '<mask>']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
return [self.get_tokenizer(**kwargs)]
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def test_tokenizer_single_example(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize('LAGVS')
self.assertListEqual(tokens, ['L', 'A', 'G', 'V', 'S'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [4, 5, 6, 7, 8])
def test_tokenizer_encode_single(self):
tokenizer = self.tokenizer_class(self.vocab_file)
seq = 'LAGVS'
self.assertListEqual(tokenizer.encode(seq), [0, 4, 5, 6, 7, 8, 2])
def test_tokenizer_call_no_pad(self):
tokenizer = self.tokenizer_class(self.vocab_file)
seq_batch = ['LAGVS', 'WCB']
tokens_batch = tokenizer(seq_batch, padding=False)['input_ids']
self.assertListEqual(tokens_batch, [[0, 4, 5, 6, 7, 8, 2], [0, 22, 23, 25, 2]])
def test_tokenizer_call_pad(self):
tokenizer = self.tokenizer_class(self.vocab_file)
seq_batch = ['LAGVS', 'WCB']
tokens_batch = tokenizer(seq_batch, padding=True)['input_ids']
self.assertListEqual(tokens_batch, [[0, 4, 5, 6, 7, 8, 2], [0, 22, 23, 25, 2, 1, 1]])
def test_tokenize_special_tokens(self):
tokenizers = self.get_tokenizers(fast=True)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
SPECIAL_TOKEN_1 = '<unk>'
SPECIAL_TOKEN_2 = '<mask>'
token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1)
token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2)
self.assertEqual(len(token_1), 1)
self.assertEqual(len(token_2), 1)
self.assertEqual(token_1[0], SPECIAL_TOKEN_1)
self.assertEqual(token_2[0], SPECIAL_TOKEN_2) |
class SendSticker():
async def send_sticker(self: 'pyrogram.Client', chat_id: Union[(int, str)], sticker: Union[(str, BinaryIO)], disable_notification: bool=None, reply_to_message_id: int=None, schedule_date: datetime=None, protect_content: bool=None, reply_markup: Union[('types.InlineKeyboardMarkup', 'types.ReplyKeyboardMarkup', 'types.ReplyKeyboardRemove', 'types.ForceReply')]=None, progress: Callable=None, progress_args: tuple=()) -> Optional['types.Message']:
file = None
try:
if isinstance(sticker, str):
if os.path.isfile(sticker):
file = (await self.save_file(sticker, progress=progress, progress_args=progress_args))
media = raw.types.InputMediaUploadedDocument(mime_type=(self.guess_mime_type(sticker) or 'image/webp'), file=file, attributes=[raw.types.DocumentAttributeFilename(file_name=os.path.basename(sticker))])
elif re.match('^ sticker):
media = raw.types.InputMediaDocumentExternal(url=sticker)
else:
media = utils.get_input_media_from_file_id(sticker, FileType.STICKER)
else:
file = (await self.save_file(sticker, progress=progress, progress_args=progress_args))
media = raw.types.InputMediaUploadedDocument(mime_type=(self.guess_mime_type(sticker.name) or 'image/webp'), file=file, attributes=[raw.types.DocumentAttributeFilename(file_name=sticker.name)])
while True:
try:
r = (await self.invoke(raw.functions.messages.SendMedia(peer=(await self.resolve_peer(chat_id)), media=media, silent=(disable_notification or None), reply_to_msg_id=reply_to_message_id, random_id=self.rnd_id(), schedule_date=utils.datetime_to_timestamp(schedule_date), noforwards=protect_content, reply_markup=((await reply_markup.write(self)) if reply_markup else None), message='')))
except FilePartMissing as e:
(await self.save_file(sticker, file_id=file.id, file_part=e.value))
else:
for i in r.updates:
if isinstance(i, (raw.types.UpdateNewMessage, raw.types.UpdateNewChannelMessage, raw.types.UpdateNewScheduledMessage)):
return (await types.Message._parse(self, i.message, {i.id: i for i in r.users}, {i.id: i for i in r.chats}, is_scheduled=isinstance(i, raw.types.UpdateNewScheduledMessage)))
except StopTransmission:
return None |
('pypyr.venv.subprocess.run')
def test_env_builder_install_pip_extras_quiet(mock_subproc_run):
eb = EnvBuilderWithExtraDeps(is_quiet=True)
context = get_simple_context()
eb.post_setup(context)
eb.pip_install_extras('package1 package2==1.2.3 package3>=4.5.6,<7.8.9')
mock_subproc_run.assert_called_once_with(['/python', '-m', 'pip', 'install', '-q', 'package1', 'package2==1.2.3', 'package3>=4.5.6,<7.8.9'], check=True) |
class TestFDDBRetinaNet(TestFDDB):
def eval(self):
retinanet = build_whole_network.DetectionNetworkRetinaNet(cfgs=self.cfgs, is_training=False)
all_boxes_r = self.eval_with_plac(img_dir=self.args.img_dir, det_net=retinanet, image_ext=self.args.image_ext)
imgs = os.listdir(self.args.img_dir)
real_test_imgname_list = [i.split(self.args.image_ext)[0] for i in imgs]
print((10 * '**'))
print('rotation eval:')
evaler = EVAL(self.cfgs)
evaler.voc_evaluate_detections(all_boxes=all_boxes_r, test_imgid_list=real_test_imgname_list, test_annotation_path=self.args.test_annotation_path) |
def make_github_url(file_name):
URL_BASE = '
if any(((d in file_name) for d in sphinx_gallery_conf['gallery_dirs'])):
if (file_name.split('/')[(- 1)] == 'index'):
example_file = 'README.rst'
else:
example_file = file_name.split('/')[(- 1)].replace('.rst', '.py')
target_url = ((URL_BASE + 'docs/examples/') + example_file)
elif ('generated' in file_name):
qualname = file_name.split('/')[(- 1)].replace('.rst', '')
(obj, module) = get_obj_module(qualname)
path = (module.__name__.replace('.', '/') + '.py')
target_url = (URL_BASE + path)
(start, end) = get_linenos(obj)
if (start and end):
target_url += f'#L{start}-L{end}'
else:
target_url = ((URL_BASE + 'docs/sphinx/source/') + file_name)
return target_url |
.parametrize('username,password', users)
def test_project_create_import_post_upload_file(db, settings, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create_import')
xml_file = os.path.join(settings.BASE_DIR, 'xml', 'project.xml')
with open(xml_file, encoding='utf8') as f:
response = client.post(url, {'method': 'upload_file', 'uploaded_file': f})
if password:
assert (response.status_code == 302)
assert response.url.startswith('/projects/import/')
response = client.get(response.url)
assert (response.status_code == 200)
assert (b'Create project from project.xml' in response.content)
else:
assert (response.status_code == 302)
assert response.url.startswith('/account/login/') |
class TestROI(ROI):
def __init__(self, pos, size, **args):
ROI.__init__(self, pos, size, **args)
self.addTranslateHandle([0.5, 0.5])
self.addScaleHandle([1, 1], [0, 0])
self.addScaleHandle([0, 0], [1, 1])
self.addScaleRotateHandle([1, 0.5], [0.5, 0.5])
self.addScaleHandle([0.5, 1], [0.5, 0.5])
self.addRotateHandle([1, 0], [0, 0])
self.addRotateHandle([0, 1], [1, 1]) |
.xfail(reason='See PR #938')
class TestImportmap(PyScriptTest):
def test_importmap(self):
src = '\n export function say_hello(who) {\n console.log("hello from", who);\n }\n '
self.writefile('mymod.js', src)
self.pyscript_run('\n <script type="importmap">\n {\n "imports": {\n "mymod": "/mymod.js"\n }\n }\n </script>\n\n <script type="module">\n import { say_hello } from "mymod";\n say_hello("JS");\n </script>\n\n <script type="py">\n import mymod\n mymod.say_hello("Python")\n </script>\n ')
assert (self.console.log.lines == ['hello from JS', 'hello from Python'])
def test_invalid_json(self):
self.pyscript_run('\n <script type="importmap">\n this is not valid JSON\n </script>\n\n <script type="py">\n print("hello world")\n </script>\n ', wait_for_pyscript=False)
self.check_js_errors('Failed to parse import map')
self.wait_for_pyscript()
assert (self.console.log.lines == ['hello world'])
banner = self.page.locator('.py-warning')
assert ('Failed to parse import map' in banner.inner_text()) |
class SignalRegistrationInterface():
__slots__ = ('_handlers',)
def __init__(self, handlers: List[Callable[(..., None)]]) -> None:
self._handlers = handlers
def register_handler(self, handler: Callable[(..., None)]) -> 'SignalRegistrationInterface':
self._handlers.append(handler)
return self
def unregister_handler(self, handler: Callable[(..., None)]) -> 'SignalRegistrationInterface':
self._handlers.remove(handler)
return self |
class TypeshedFinder():
ctx: CanAssignContext = field(repr=False)
verbose: bool = True
resolver: typeshed_client.Resolver = field(default_factory=typeshed_client.Resolver)
_assignment_cache: Dict[(Tuple[(str, ast.AST)], Value)] = field(default_factory=dict, repr=False, init=False)
_attribute_cache: Dict[(Tuple[(str, str, bool)], Value)] = field(default_factory=dict, repr=False, init=False)
_active_infos: List[typeshed_client.resolver.ResolvedName] = field(default_factory=list, repr=False, init=False)
def make(cls, can_assign_ctx: CanAssignContext, options: Options, *, verbose: bool=False) -> 'TypeshedFinder':
extra_paths = options.get_value_for(StubPath)
ctx = typeshed_client.get_search_context()
ctx = typeshed_client.get_search_context(search_path=[*ctx.search_path, *extra_paths])
resolver = typeshed_client.Resolver(ctx)
return TypeshedFinder(can_assign_ctx, verbose, resolver)
def log(self, message: str, obj: object) -> None:
if (not self.verbose):
return
print(f'{message}: {obj!r}')
def _get_sig_from_method_descriptor(self, obj: MethodDescriptorType, allow_call: bool) -> Optional[ConcreteSignature]:
objclass = obj.__objclass__
fq_name = self._get_fq_name(objclass)
if (fq_name is None):
return None
info = self._get_info_for_name(fq_name)
sig = self._get_method_signature_from_info(info, obj, fq_name, objclass.__module__, objclass, allow_call=allow_call)
return sig
def get_argspec(self, obj: object, *, allow_call: bool=False, type_params: Sequence[Value]=()) -> Optional[ConcreteSignature]:
if isinstance(obj, str):
return self.get_argspec_for_fully_qualified_name(obj, obj, type_params=type_params)
if (inspect.ismethoddescriptor(obj) and hasattr_static(obj, '__objclass__')):
return self._get_sig_from_method_descriptor(obj, allow_call)
if (inspect.isbuiltin(obj) and isinstance(obj.__self__, type)):
method = obj.__self__.__dict__.get(obj.__name__)
if ((method is not None) and inspect.ismethoddescriptor(method) and hasattr_static(method, '__objclass__')):
sig = self._get_sig_from_method_descriptor(method, allow_call)
if (sig is None):
return None
bound = make_bound_method(sig, Composite(TypedValue(obj.__self__)))
if (bound is None):
return None
return bound.get_signature(ctx=self.ctx)
if inspect.ismethod(obj):
self.log('Ignoring method', obj)
return None
if (hasattr_static(obj, '__qualname__') and hasattr_static(obj, '__name__') and hasattr_static(obj, '__module__') and isinstance(obj.__qualname__, str) and (obj.__qualname__ != obj.__name__) and ('.' in obj.__qualname__)):
(parent_name, own_name) = obj.__qualname__.rsplit('.', maxsplit=1)
if ((parent_name == 'EnumType') and (obj.__module__ == 'enum')):
parent_fqn = 'enum.EnumMeta'
else:
parent_fqn = f'{obj.__module__}.{parent_name}'
parent_info = self._get_info_for_name(parent_fqn)
if (parent_info is not None):
maybe_info = self._get_child_info(parent_info, own_name, obj.__module__)
if (maybe_info is not None):
(info, mod) = maybe_info
fq_name = f'{parent_fqn}.{own_name}'
sig = self._get_signature_from_info(info, obj, fq_name, mod, allow_call=allow_call)
return sig
fq_name = self._get_fq_name(obj)
if (fq_name is None):
return None
return self.get_argspec_for_fully_qualified_name(fq_name, obj, allow_call=allow_call, type_params=type_params)
def get_argspec_for_fully_qualified_name(self, fq_name: str, obj: object, *, allow_call: bool=False, type_params: Sequence[Value]=()) -> Optional[ConcreteSignature]:
info = self._get_info_for_name(fq_name)
(mod, _) = fq_name.rsplit('.', maxsplit=1)
sig = self._get_signature_from_info(info, obj, fq_name, mod, allow_call=allow_call, type_params=type_params)
return sig
def get_bases(self, typ: type) -> Optional[List[Value]]:
return self.get_bases_for_value(TypedValue(typ))
def get_bases_for_value(self, val: Value) -> Optional[List[Value]]:
if isinstance(val, TypedValue):
if isinstance(val.typ, type):
typ = val.typ
if (typ is AbstractSet):
return [GenericValue(Collection, (TypeVarValue(T_co),))]
if ((typ is Callable) or (typ is collections.abc.Callable)):
return None
if (typ is TypedDict):
return [GenericValue(MutableMapping, [TypedValue(str), TypedValue(object)])]
if (typ is EnumMeta):
return [TypedValue(type)]
fq_name = self._get_fq_name(typ)
if (fq_name is None):
return None
else:
fq_name = val.typ
if (fq_name == 'collections.abc.Set'):
return [GenericValue(Collection, (TypeVarValue(T_co),))]
elif (fq_name == 'contextlib.AbstractContextManager'):
return [GenericValue(Protocol, (TypeVarValue(T_co),))]
elif (fq_name in ('typing.Callable', 'collections.abc.Callable')):
return None
elif is_typing_name(fq_name, 'TypedDict'):
return [GenericValue(MutableMapping, [TypedValue(str), TypedValue(object)])]
return self.get_bases_for_fq_name(fq_name)
return None
def is_protocol(self, typ: type) -> bool:
fq_name = self._get_fq_name(typ)
if (fq_name is None):
return False
bases = self.get_bases_for_value(TypedValue(fq_name))
if (bases is None):
return False
return any(((isinstance(base, TypedValue) and is_typing_name(base.typ, 'Protocol')) for base in bases))
def get_bases_recursively(self, typ: Union[(type, str)]) -> List[Value]:
stack = [TypedValue(typ)]
seen = set()
bases = []
while stack:
next_base = stack.pop()
if (next_base in seen):
continue
seen.add(next_base)
bases.append(next_base)
new_bases = self.get_bases_for_value(next_base)
if (new_bases is not None):
bases += new_bases
return bases
def get_bases_for_fq_name(self, fq_name: str) -> Optional[List[Value]]:
if (fq_name in ('typing.Generic', 'typing.Protocol', 'typing_extensions.Protocol')):
return []
info = self._get_info_for_name(fq_name)
(mod, _) = fq_name.rsplit('.', maxsplit=1)
return self._get_bases_from_info(info, mod, fq_name)
def get_attribute(self, typ: type, attr: str, *, on_class: bool) -> Value:
fq_name = self._get_fq_name(typ)
if (fq_name is None):
return UNINITIALIZED_VALUE
return self.get_attribute_for_fq_name(fq_name, attr, on_class=on_class)
def get_attribute_for_fq_name(self, fq_name: str, attr: str, *, on_class: bool) -> Value:
key = (fq_name, attr, on_class)
try:
return self._attribute_cache[key]
except KeyError:
info = self._get_info_for_name(fq_name)
(mod, _) = fq_name.rsplit('.', maxsplit=1)
val = self._get_attribute_from_info(info, mod, attr, on_class=on_class)
self._attribute_cache[key] = val
return val
def get_attribute_recursively(self, fq_name: str, attr: str, *, on_class: bool) -> Tuple[(Value, Union[(type, str, None)])]:
for base in self.get_bases_recursively(fq_name):
if isinstance(base, TypedValue):
if isinstance(base.typ, str):
possible_value = self.get_attribute_for_fq_name(base.typ, attr, on_class=on_class)
else:
possible_value = self.get_attribute(base.typ, attr, on_class=on_class)
if (possible_value is not UNINITIALIZED_VALUE):
return (possible_value, base.typ)
return (UNINITIALIZED_VALUE, None)
def has_attribute(self, typ: Union[(type, str)], attr: str) -> bool:
if self._has_own_attribute(typ, attr):
return True
bases = self.get_bases_for_value(TypedValue(typ))
if (bases is not None):
for base in bases:
if (not isinstance(base, TypedValue)):
continue
typ = base.typ
if ((typ is Generic) or is_typing_name(typ, 'Protocol')):
continue
if self.has_attribute(base.typ, attr):
return True
return False
def get_all_attributes(self, typ: Union[(type, str)]) -> Set[str]:
if isinstance(typ, str):
fq_name = typ
else:
fq_name = self._get_fq_name(typ)
if (fq_name is None):
return set()
info = self._get_info_for_name(fq_name)
(mod, _) = fq_name.rsplit('.', maxsplit=1)
return self._get_all_attributes_from_info(info, mod)
def has_stubs(self, typ: type) -> bool:
fq_name = self._get_fq_name(typ)
if (fq_name is None):
return False
info = self._get_info_for_name(fq_name)
return (info is not None)
def resolve_name(self, module: str, name: str) -> Value:
info = self._get_info_for_name(f'{module}.{name}')
if (info is not None):
return self._value_from_info(info, module)
elif hasattr(builtins, name):
val = getattr(builtins, name)
if ((val is None) or isinstance(val, type)):
return KnownValue(val)
return AnyValue(AnySource.inference)
def _get_attribute_from_info(self, info: typeshed_client.resolver.ResolvedName, mod: str, attr: str, *, on_class: bool, is_typeddict: bool=False) -> Value:
if (info is None):
return UNINITIALIZED_VALUE
elif isinstance(info, typeshed_client.ImportedInfo):
return self._get_attribute_from_info(info.info, '.'.join(info.source_module), attr, on_class=on_class)
elif isinstance(info, typeshed_client.NameInfo):
if isinstance(info.ast, ast.ClassDef):
if (info.child_nodes and (attr in info.child_nodes)):
child_info = info.child_nodes[attr]
if isinstance(child_info, typeshed_client.NameInfo):
return self._get_value_from_child_info(child_info.ast, mod, is_typeddict=is_typeddict, on_class=on_class, parent_name=info.ast.name)
assert False, repr(child_info)
return UNINITIALIZED_VALUE
elif isinstance(info.ast, ast.Assign):
val = self._parse_type(info.ast.value, mod)
if (isinstance(val, KnownValue) and isinstance(val.val, type)):
return self.get_attribute(val.val, attr, on_class=on_class)
else:
return UNINITIALIZED_VALUE
else:
return UNINITIALIZED_VALUE
return UNINITIALIZED_VALUE
def _get_value_from_child_info(self, node: Union[(ast.AST, typeshed_client.OverloadedName, typeshed_client.ImportedName)], mod: str, *, is_typeddict: bool, on_class: bool, parent_name: str) -> Value:
if isinstance(node, ast.AnnAssign):
return self._parse_type(node.annotation, mod, is_typeddict=is_typeddict)
elif isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
decorators = [self._parse_expr(decorator, mod) for decorator in node.decorator_list]
if (node.returns and (set(decorators) & PROPERTY_LIKE)):
return self._parse_type(node.returns, mod)
sig = self._get_signature_from_func_def(node, None, mod, autobind=(not on_class))
if (sig is None):
return AnyValue(AnySource.inference)
else:
return CallableValue(sig)
elif isinstance(node, ast.ClassDef):
return AnyValue(AnySource.inference)
elif isinstance(node, ast.Assign):
return UNINITIALIZED_VALUE
elif isinstance(node, typeshed_client.OverloadedName):
vals = [self._get_value_from_child_info(subnode, mod, is_typeddict=is_typeddict, on_class=on_class, parent_name=parent_name) for subnode in node.definitions]
if all_of_type(vals, CallableValue):
sigs = [val.signature for val in vals]
if all_of_type(sigs, Signature):
return CallableValue(OverloadedSignature(sigs))
return AnyValue(AnySource.inference)
assert False, repr(node)
def _get_child_info(self, info: typeshed_client.resolver.ResolvedName, attr: str, mod: str) -> Optional[Tuple[(typeshed_client.resolver.ResolvedName, str)]]:
if (info is None):
return None
elif isinstance(info, typeshed_client.ImportedInfo):
return self._get_child_info(info.info, attr, '.'.join(info.source_module))
elif isinstance(info, typeshed_client.NameInfo):
if isinstance(info.ast, ast.ClassDef):
if (info.child_nodes and (attr in info.child_nodes)):
return (info.child_nodes[attr], mod)
return None
elif isinstance(info.ast, ast.Assign):
return None
else:
return None
return None
def _has_own_attribute(self, typ: Union[(type, str)], attr: str) -> bool:
if ((typ is object) and (attr == '__getattribute__')):
return False
if isinstance(typ, str):
fq_name = typ
else:
fq_name = self._get_fq_name(typ)
if (fq_name is None):
return False
info = self._get_info_for_name(fq_name)
(mod, _) = fq_name.rsplit('.', maxsplit=1)
return self._has_attribute_from_info(info, mod, attr)
def _get_all_attributes_from_info(self, info: typeshed_client.resolver.ResolvedName, mod: str) -> Set[str]:
if (info is None):
return set()
elif isinstance(info, typeshed_client.ImportedInfo):
return self._get_all_attributes_from_info(info.info, '.'.join(info.source_module))
elif isinstance(info, typeshed_client.NameInfo):
if isinstance(info.ast, ast.ClassDef):
if (info.child_nodes is not None):
return set(info.child_nodes)
elif isinstance(info.ast, ast.Assign):
val = self._parse_expr(info.ast.value, mod)
if (isinstance(val, KnownValue) and isinstance(val.val, type)):
return self.get_all_attributes(val.val)
else:
return set()
else:
return set()
return set()
def _has_attribute_from_info(self, info: typeshed_client.resolver.ResolvedName, mod: str, attr: str) -> bool:
if (info is None):
return False
elif isinstance(info, typeshed_client.ImportedInfo):
return self._has_attribute_from_info(info.info, '.'.join(info.source_module), attr)
elif isinstance(info, typeshed_client.NameInfo):
if isinstance(info.ast, ast.ClassDef):
if (info.child_nodes and (attr in info.child_nodes)):
return True
return False
elif isinstance(info.ast, ast.Assign):
val = self._parse_expr(info.ast.value, mod)
if (isinstance(val, KnownValue) and isinstance(val.val, type)):
return self.has_attribute(val.val, attr)
else:
return False
else:
return False
return False
def _get_bases_from_info(self, info: typeshed_client.resolver.ResolvedName, mod: str, fq_name: str) -> Optional[List[Value]]:
if (info is None):
return None
elif isinstance(info, typeshed_client.ImportedInfo):
return self._get_bases_from_info(info.info, '.'.join(info.source_module), fq_name)
elif isinstance(info, typeshed_client.NameInfo):
if isinstance(info.ast, ast.ClassDef):
bases = info.ast.bases
return [self._parse_type(base, mod) for base in bases]
elif isinstance(info.ast, ast.Assign):
val = self._parse_expr(info.ast.value, mod)
if (isinstance(val, KnownValue) and isinstance(val.val, type)):
new_fq_name = self._get_fq_name(val.val)
if (fq_name == new_fq_name):
return [AnyValue(AnySource.inference)]
return self.get_bases(val.val)
else:
return [AnyValue(AnySource.inference)]
elif isinstance(info.ast, (typeshed_client.OverloadedName, typeshed_client.ImportedName, ast.FunctionDef)):
return None
else:
raise NotImplementedError(ast.dump(info.ast))
return None
def _get_method_signature_from_info(self, info: typeshed_client.resolver.ResolvedName, obj: object, fq_name: str, mod: str, objclass: type, *, allow_call: bool=False) -> Optional[ConcreteSignature]:
if (info is None):
return None
elif isinstance(info, typeshed_client.ImportedInfo):
return self._get_method_signature_from_info(info.info, obj, fq_name, '.'.join(info.source_module), objclass, allow_call=allow_call)
elif isinstance(info, typeshed_client.NameInfo):
if (info.child_nodes and (obj.__name__ in info.child_nodes)):
child_info = info.child_nodes[obj.__name__]
return self._get_signature_from_info(child_info, obj, fq_name, mod, objclass, allow_call=allow_call)
else:
return None
else:
self.log('Ignoring unrecognized info', (fq_name, info))
return None
def _get_fq_name(self, obj: Any) -> Optional[str]:
if (obj is GeneratorType):
return 'typing.Generator'
if (obj is open):
return 'builtins.open'
try:
module_name = obj.__module__
if (module_name is None):
module_name = 'builtins'
if (module_name == '_io'):
module_name = 'io'
fq_name = '.'.join([module_name, obj.__qualname__])
if (not _obj_from_qualname_is(module_name, obj.__qualname__, obj)):
self.log('Ignoring invalid name', fq_name)
return None
return _TYPING_ALIASES.get(fq_name, fq_name)
except (AttributeError, TypeError):
self.log('Ignoring object without module or qualname', obj)
return None
def _get_signature_from_info(self, info: typeshed_client.resolver.ResolvedName, obj: object, fq_name: str, mod: str, objclass: Optional[type]=None, *, allow_call: bool=False, type_params: Sequence[Value]=()) -> Optional[ConcreteSignature]:
if isinstance(info, typeshed_client.NameInfo):
if isinstance(info.ast, (ast.FunctionDef, ast.AsyncFunctionDef)):
return self._get_signature_from_func_def(info.ast, obj, mod, objclass, allow_call=allow_call)
elif isinstance(info.ast, typeshed_client.OverloadedName):
sigs = []
for defn in info.ast.definitions:
if (not isinstance(defn, (ast.FunctionDef, ast.AsyncFunctionDef))):
self.log('Ignoring unrecognized AST in overload', (fq_name, info))
return None
sig = self._get_signature_from_func_def(defn, obj, mod, objclass, allow_call=allow_call)
if (sig is None):
self.log('Could not get sig for overload member', (defn,))
return None
sigs.append(sig)
return OverloadedSignature(sigs)
elif isinstance(info.ast, ast.ClassDef):
(new_value, provider) = self.get_attribute_recursively(fq_name, '__new__', on_class=True)
sig = None
from_init = False
if ((new_value is UNINITIALIZED_VALUE) or (provider is object)):
(init_value, provider) = self.get_attribute_recursively(fq_name, '__init__', on_class=True)
if isinstance(init_value, CallableValue):
sig = init_value.signature
from_init = True
elif isinstance(new_value, CallableValue):
sig = new_value.signature
if (sig is not None):
if safe_isinstance(obj, type):
if allow_call:
if isinstance(sig, Signature):
sig = replace(sig, allow_call=True, callable=obj)
else:
sig = OverloadedSignature([replace(sig, allow_call=True, callable=obj) for sig in sig.signatures])
typ = obj
else:
typ = fq_name
if type_params:
self_val = GenericValue(typ, type_params)
else:
self_val = TypedValue(typ)
if from_init:
sig = sig.replace_return_value(self_val)
self_annotation_value = self_val
else:
self_annotation_value = SubclassValue(self_val)
bound_sig = make_bound_method(sig, Composite(self_val))
if (bound_sig is None):
return None
sig = bound_sig.get_signature(ctx=self.ctx, self_annotation_value=self_annotation_value)
return sig
return None
else:
self.log('Ignoring unrecognized AST', (fq_name, info))
return None
elif isinstance(info, typeshed_client.ImportedInfo):
return self._get_signature_from_info(info.info, obj, fq_name, '.'.join(info.source_module), objclass, allow_call=allow_call)
elif (info is None):
return None
else:
self.log('Ignoring unrecognized info', (fq_name, info))
return None
.cached_per_instance()
def _get_info_for_name(self, fq_name: str) -> typeshed_client.resolver.ResolvedName:
return self.resolver.get_fully_qualified_name(fq_name)
def _get_signature_from_func_def(self, node: Union[(ast.FunctionDef, ast.AsyncFunctionDef)], obj: object, mod: str, objclass: Optional[type]=None, *, autobind: bool=False, allow_call: bool=False) -> Optional[Signature]:
is_classmethod = is_staticmethod = is_evaluated = False
deprecated = None
for decorator_ast in node.decorator_list:
decorator = self._parse_expr(decorator_ast, mod)
if ((decorator == KnownValue(abstractmethod)) or (decorator == KnownValue(overload)) or (decorator == KnownValue(real_overload))):
continue
elif (decorator == KnownValue(classmethod)):
is_classmethod = True
if autobind:
continue
elif (decorator == KnownValue(staticmethod)):
is_staticmethod = True
if autobind:
continue
elif (decorator == KnownValue(evaluated)):
is_evaluated = True
continue
elif (isinstance(decorator, DecoratorValue) and (decorator.decorator is deprecated_decorator)):
arg = decorator.args[0]
if (isinstance(arg, KnownValue) and isinstance(arg.val, str)):
deprecated = arg.val
if (node.returns is None):
return_value = AnyValue(AnySource.unannotated)
else:
return_value = self._parse_type(node.returns, mod)
if node.decorator_list:
objclass = None
args = node.args
arguments: List[SigParameter] = []
num_pos_only_args = len(args.posonlyargs)
defaults = args.defaults
num_pos_only_defaults = (len(defaults) - len(args.args))
if (num_pos_only_defaults > 0):
num_without_default = (num_pos_only_args - num_pos_only_defaults)
pos_only_defaults = (([None] * num_without_default) + defaults[num_pos_only_defaults:])
defaults = defaults[num_pos_only_defaults:]
else:
pos_only_defaults = [None for _ in args.posonlyargs]
arguments += self._parse_param_list(args.posonlyargs, pos_only_defaults, mod, ParameterKind.POSITIONAL_ONLY, objclass)
num_without_defaults = (len(args.args) - len(defaults))
defaults = (([None] * num_without_defaults) + defaults)
arguments += self._parse_param_list(args.args, defaults, mod, ParameterKind.POSITIONAL_OR_KEYWORD, objclass)
if autobind:
if (is_classmethod or (not is_staticmethod)):
arguments = arguments[1:]
if (args.vararg is not None):
arguments.append(self._parse_param(args.vararg, None, mod, ParameterKind.VAR_POSITIONAL))
arguments += self._parse_param_list(args.kwonlyargs, args.kw_defaults, mod, ParameterKind.KEYWORD_ONLY)
if (args.kwarg is not None):
arguments.append(self._parse_param(args.kwarg, None, mod, ParameterKind.VAR_KEYWORD))
seen_non_positional = False
cleaned_arguments = []
for arg in arguments:
if ((arg.kind is ParameterKind.POSITIONAL_ONLY) and seen_non_positional):
cleaned_arguments = [replace(arg, kind=ParameterKind.POSITIONAL_ONLY) for arg in cleaned_arguments]
seen_non_positional = False
else:
seen_non_positional = True
cleaned_arguments.append(arg)
if is_evaluated:
ctx = _AnnotationContext(self, mod)
evaluator = SyntheticEvaluator(node, return_value, _DummyErrorContext(), ctx)
else:
evaluator = None
return Signature.make(cleaned_arguments, callable=obj, return_annotation=(make_coro_type(return_value) if isinstance(node, ast.AsyncFunctionDef) else return_value), allow_call=allow_call, evaluator=evaluator, deprecated=deprecated)
def _parse_param_list(self, args: Iterable[ast.arg], defaults: Iterable[Optional[ast.AST]], module: str, kind: ParameterKind, objclass: Optional[type]=None) -> Iterable[SigParameter]:
for (i, (arg, default)) in enumerate(zip(args, defaults)):
(yield self._parse_param(arg, default, module, kind, objclass=(objclass if (i == 0) else None)))
def _parse_param(self, arg: ast.arg, default: Optional[ast.AST], module: str, kind: ParameterKind, *, objclass: Optional[type]=None) -> SigParameter:
typ = AnyValue(AnySource.unannotated)
if (arg.annotation is not None):
typ = self._parse_type(arg.annotation, module, allow_unpack=kind.allow_unpack())
elif (objclass is not None):
bases = self.get_bases(objclass)
if (bases is None):
typ = TypedValue(objclass)
else:
typevars = uniq_chain((extract_typevars(base) for base in bases))
if typevars:
typ = GenericValue(objclass, [make_type_var_value(tv, _AnnotationContext(finder=self, module=tv.__module__)) for tv in typevars])
else:
typ = TypedValue(objclass)
name = arg.arg
if ((kind is ParameterKind.POSITIONAL_OR_KEYWORD) and is_positional_only_arg_name(name)):
kind = ParameterKind.POSITIONAL_ONLY
name = name[2:]
typ = translate_vararg_type(kind, typ, self.ctx)
if (objclass is not None):
kind = ParameterKind.POSITIONAL_ONLY
if (default is None):
return SigParameter(name, kind, annotation=typ)
else:
default_value = self._parse_expr(default, module)
if (default_value == KnownValue(...)):
default_value = AnyValue(AnySource.unannotated)
return SigParameter(name, kind, annotation=typ, default=default_value)
def _parse_expr(self, node: ast.AST, module: str) -> Value:
ctx = _AnnotationContext(finder=self, module=module)
return value_from_ast(node, ctx=ctx)
def _parse_type(self, node: ast.AST, module: str, *, is_typeddict: bool=False, allow_unpack: bool=False) -> Value:
val = self._parse_expr(node, module)
ctx = _AnnotationContext(finder=self, module=module)
typ = type_from_value(val, ctx=ctx, is_typeddict=is_typeddict, allow_unpack=allow_unpack)
if (self.verbose and isinstance(typ, AnyValue)):
self.log('Got Any', (ast.dump(node), module))
return typ
def _parse_call_assignment(self, info: typeshed_client.NameInfo, module: str) -> Value:
try:
__import__(module)
mod = sys.modules[module]
return KnownValue(getattr(mod, info.name))
except Exception:
pass
if ((not isinstance(info.ast, ast.Assign)) or (not isinstance(info.ast.value, ast.Call))):
return AnyValue(AnySource.inference)
ctx = _AnnotationContext(finder=self, module=module)
return value_from_ast(info.ast.value, ctx=ctx)
def _extract_metadata(self, module: str, node: ast.ClassDef) -> Sequence[Extension]:
metadata = []
for decorator in node.decorator_list:
decorator_val = self._parse_expr(decorator, module)
if (isinstance(decorator_val, DecoratorValue) and (decorator_val.decorator is deprecated_decorator)):
arg = decorator_val.args[0]
if (isinstance(arg, KnownValue) and isinstance(arg.val, str)):
metadata.append(DeprecatedExtension(arg.val))
return metadata
def make_synthetic_type(self, module: str, info: typeshed_client.NameInfo) -> Value:
fq_name = f'{module}.{info.name}'
bases = self._get_bases_from_info(info, module, fq_name)
typ = TypedValue(fq_name)
if isinstance(info.ast, ast.ClassDef):
metadata = self._extract_metadata(module, info.ast)
else:
metadata = []
if (bases is not None):
if any((((isinstance(base, KnownValue) and is_typing_name(base.val, 'TypedDict')) or isinstance(base, TypedDictValue)) for base in bases)):
typ = self._make_typeddict(module, info, bases)
val = SubclassValue(typ, exactly=True)
if metadata:
return annotate_value(val, metadata)
return val
def _make_typeddict(self, module: str, info: typeshed_client.NameInfo, bases: Sequence[Value]) -> TypedDictValue:
total = True
if isinstance(info.ast, ast.ClassDef):
for keyword in info.ast.keywords:
if (keyword.arg == 'total'):
val = self._parse_expr(keyword.value, module)
if (isinstance(val, KnownValue) and isinstance(val.val, bool)):
total = val.val
attrs = self._get_all_attributes_from_info(info, module)
fields = [self._get_attribute_from_info(info, module, attr, on_class=True, is_typeddict=True) for attr in attrs]
items = {}
for base in bases:
if isinstance(base, TypedDictValue):
items.update(base.items)
items.update({attr: self._make_td_value(field, total) for (attr, field) in zip(attrs, fields)})
return TypedDictValue(items)
def _make_td_value(self, field: Value, total: bool) -> Tuple[(bool, Value)]:
if isinstance(field, Pep655Value):
return (field.required, field.value)
else:
return (total, field)
def _value_from_info(self, info: typeshed_client.resolver.ResolvedName, module: str) -> Value:
if (info in self._active_infos):
return AnyValue(AnySource.inference)
self._active_infos.append(info)
try:
return self._value_from_info_inner(info, module)
finally:
self._active_infos.pop()
def _value_from_info_inner(self, info: typeshed_client.resolver.ResolvedName, module: str) -> Value:
if isinstance(info, typeshed_client.ImportedInfo):
return self._value_from_info(info.info, '.'.join(info.source_module))
elif isinstance(info, typeshed_client.NameInfo):
fq_name = f'{module}.{info.name}'
if (fq_name in _TYPING_ALIASES):
new_fq_name = _TYPING_ALIASES[fq_name]
info = self._get_info_for_name(new_fq_name)
return self._value_from_info(info, new_fq_name.rsplit('.', maxsplit=1)[0])
if isinstance(info.ast, ast.Assign):
key = (module, info.ast)
if (key in self._assignment_cache):
return self._assignment_cache[key]
if isinstance(info.ast.value, ast.Call):
value = self._parse_call_assignment(info, module)
else:
value = self._parse_expr(info.ast.value, module)
self._assignment_cache[key] = value
return value
try:
__import__(module)
mod = sys.modules[module]
return KnownValue(getattr(mod, info.name))
except Exception:
if isinstance(info.ast, ast.ClassDef):
return self.make_synthetic_type(module, info)
elif isinstance(info.ast, ast.AnnAssign):
val = self._parse_type(info.ast.annotation, module)
if (val != AnyValue(AnySource.incomplete_annotation)):
return val
if info.ast.value:
return self._parse_expr(info.ast.value, module)
elif isinstance(info.ast, (ast.FunctionDef, ast.AsyncFunctionDef, typeshed_client.OverloadedName)):
sig = self._get_signature_from_info(info, None, fq_name, module)
if (sig is not None):
return CallableValue(sig)
self.log('Unable to import', (module, info))
return AnyValue(AnySource.inference)
elif isinstance(info, tuple):
module_path = '.'.join(info)
try:
__import__(module_path)
return KnownValue(sys.modules[module_path])
except Exception:
return SyntheticModuleValue(info)
else:
self.log('Ignoring info', info)
return AnyValue(AnySource.inference) |
class ZhongFen():
def __init__(self, ck, index):
self.ck = ck
self.index = index
self.headers = {'Host': 'lses-lcae.ihuju.cn', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 13; AC Build/TP1A.220624.014; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/115.0.5790.166 Mobile Safari/537.36 XiaoMi/MiuiBrowser/10.8.1 LT-APP/45/158/YM-RT/', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'X-Requested-With': 'com.cb.tiaoma.zf', 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7', 'Cookie': f'token={ck}'}
def get_infomation(self):
headers = self.headers
index = self.index
response = requests.get(' headers=headers)
response_text = response.text
pattern = '<div class="money-c available_money">(\\d*\\.\\d*)</div>'
balance = re.findall(pattern, response_text)[0]
pattern = '<div class="money-c"><span id="jifen">(\\d*\\.\\d*)</span></div>'
locked_amount = re.findall(pattern, response_text)[0]
pattern = '<div class="money-c">(\\d*\\.\\d*)</div>'
tomorrow_earnings = re.findall(pattern, response_text)[0]
print(f'[{(index + 1)}] :{balance} :{locked_amount} :{tomorrow_earnings}')
def sign_in(self):
headers = self.headers
index = self.index
for _ in range(10):
data = {'uid': '51590'}
url = '
response = requests.post(url, headers=headers, data=data)
response_text = response.text
if ('' in response_text):
print(f'[{(index + 1)}] ')
return
response_dict = response.json()
if (response_dict.get('status') == 1):
num = response_dict.get('num')
print(f'[{(index + 1)}] {num}')
time.sleep(0.5)
continue
print(f'[{(index + 1)}] {response_dict}')
def withdraw(self):
headers = self.headers
index = self.index
ck = self.ck
url = '
response = requests.get(url, headers=headers)
response_text = response.text
pattern = 'class="jui_fc_red">(.*?)</a>'
jui_fc_red = re.findall(pattern, response_text)[0]
pattern = '<span id="money_num">(\\d*\\.\\d*)</span>'
money_num = float(re.findall(pattern, response_text)[0])
if ('' in jui_fc_red):
print(f'[{(index + 1)}] :{money_num} ')
return
if (money_num < 10):
print(f'[{(index + 1)}] :{money_num} ')
return
data = {'price': f'{money_num}'}
url = '
headers = {'Host': 'lses-lcae.ihuju.cn', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Linux; Android 13; AC Build/TP1A.220624.014; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/115.0.5790.166 Mobile Safari/537.36 XiaoMi/MiuiBrowser/10.8.1 LT-APP/45/158/YM-RT/', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Origin': ' 'Referer': ' 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7', 'Cookie': f'token={ck}'}
response = requests.post(url, headers=headers, data=data)
response_dict = response.json()
info = response_dict.get('info')
print(f'[{(index + 1)}] {money_num} :{info}') |
def _discretize_probability_distribution(unnormalized_probabilities, epsilon):
n = len(unnormalized_probabilities)
sub_bit_precision = max(0, int(math.ceil((- math.log((epsilon * n), 2)))))
bin_count = ((2 ** sub_bit_precision) * n)
cumulative = list(_partial_sums(unnormalized_probabilities))
total = cumulative[(- 1)]
discretized_cumulative = [int(math.floor((((c / total) * bin_count) + 0.5))) for c in cumulative]
discretized = list(_differences(discretized_cumulative))
return (discretized, bin_count, sub_bit_precision) |
def test_opwiseclinker_straightforward():
(x, y, z) = inputs()
e = add(mul(add(x, y), div(x, y)), bad_sub(bad_sub(x, y), z))
lnk = OpWiseCLinker().accept(FunctionGraph([x, y, z], [e]))
fn = make_function(lnk)
if config.cxx:
assert (fn(2.0, 2.0, 2.0) == 2.0)
else:
assert (fn(2.0, 2.0, 2.0) == (- 6)) |
class WipeExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_wipe(state, node, info, char_index):
new_node = node.copy()
new_node.states.discard(State.DIRTY)
new_node.states.add(State.CLEAN)
if modify:
(yield state.change_state([ChangeNode(new_node)], in_place=in_place))
else:
(yield state)
def check_wipe(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo, char_index):
char_node = _get_character_node(state, char_index)
if (not _is_character_close_to(state, node, char_index)):
info.error('{} is not close to {}', char_node, node)
return False
nodes_in_hands = _find_nodes_from(state, char_node, [Relation.HOLDS_RH, Relation.HOLDS_LH])
if (len(nodes_in_hands) == 0):
info.error('{} does not hold anything in hands', char_node)
return
return True |
class QuantopianUSFuturesCalendar(TradingCalendar):
def __init__(self, start=Timestamp('2000-01-01', tz=UTC), end=end_default):
super(QuantopianUSFuturesCalendar, self).__init__(start=start, end=end)
name = 'us_futures'
tz = timezone('America/New_York')
open_times = ((None, time(18, 1)),)
close_times = ((None, time(18)),)
open_offset = (- 1)
def execution_time_from_open(self, open_dates):
return (open_dates + Timedelta(hours=FUTURES_OPEN_TIME_OFFSET))
def execution_time_from_close(self, close_dates):
return (close_dates + Timedelta(hours=FUTURES_CLOSE_TIME_OFFSET))
def regular_holidays(self):
return HolidayCalendar([USNewYearsDay, GoodFriday, Christmas]) |
def gen_visualization(image, decisions):
keep_indices = get_keep_indices(decisions)
image = np.asarray(image)
image_tokens = image.reshape(14, 16, 14, 16, 3).swapaxes(1, 2).reshape(196, 16, 16, 3)
stages = [recover_image(gen_masked_tokens(image_tokens, keep_indices[i])) for i in range(3)]
viz = np.concatenate(([image] + stages), axis=1)
return viz |
class OverlapPatchEmbed(nn.Module):
def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
patch_size = to_2tuple(patch_size)
assert (max(patch_size) > stride), 'Set larger patch_size than stride'
self.patch_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, padding=((patch_size[0] // 2), (patch_size[1] // 2)))
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x):
x = self.proj(x)
feat_size = x.shape[(- 2):]
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return (x, feat_size) |
class Escpos(object, metaclass=ABCMeta):
_device: Union[(Literal[False], Literal[None], object)] = False
def __init__(self, profile=None, magic_encode_args=None, **kwargs) -> None:
self.profile = get_profile(profile)
self.magic = MagicEncode(self, **(magic_encode_args or {}))
def __del__(self):
self.close()
def device(self) -> Union[(Literal[None], object)]:
if (self._device is False):
self._device = None
self.open()
return self._device
def device(self, new_device: Union[(Literal[False], Literal[None], object)]):
self._device = new_device
def open(self):
pass
def close(self):
pass
def _raw(self, msg: bytes) -> None:
pass
def _read(self) -> bytes:
raise NotImplementedError()
def image(self, img_source, high_density_vertical: bool=True, high_density_horizontal: bool=True, impl: str='bitImageRaster', fragment_height: int=960, center: bool=False) -> None:
im = EscposImage(img_source)
try:
if (self.profile.profile_data['media']['width']['pixels'] == 'Unknown'):
print(('The media.width.pixel field of the printer profile is not set. ' + 'The center flag will have no effect.'))
max_width = int(self.profile.profile_data['media']['width']['pixels'])
if (im.width > max_width):
raise ImageWidthError(f'{im.width} > {max_width}')
if center:
im.center(max_width)
except KeyError:
pass
except ValueError:
pass
if (im.height > fragment_height):
fragments = im.split(fragment_height)
for fragment in fragments:
self.image(fragment, high_density_vertical=high_density_vertical, high_density_horizontal=high_density_horizontal, impl=impl, fragment_height=fragment_height)
return
if (impl == 'bitImageRaster'):
density_byte = ((0 if high_density_horizontal else 1) + (0 if high_density_vertical else 2))
header = ((((GS + b'v0') + bytes((density_byte,))) + self._int_low_high(im.width_bytes, 2)) + self._int_low_high(im.height, 2))
self._raw((header + im.to_raster_format()))
if (impl == 'graphics'):
img_header = (self._int_low_high(im.width, 2) + self._int_low_high(im.height, 2))
tone = b'0'
colors = b'1'
ym = (b'\x01' if high_density_vertical else b'\x02')
xm = (b'\x01' if high_density_horizontal else b'\x02')
header = ((((tone + xm) + ym) + colors) + img_header)
raster_data = im.to_raster_format()
self._image_send_graphics_data(b'0', b'p', (header + raster_data))
self._image_send_graphics_data(b'0', b'2', b'')
if (impl == 'bitImageColumn'):
density_byte = ((1 if high_density_horizontal else 0) + (32 if high_density_vertical else 0))
header = (((ESC + b'*') + six.int2byte(density_byte)) + self._int_low_high(im.width, 2))
outp = [((ESC + b'3') + six.int2byte(16))]
for blob in im.to_column_format(high_density_vertical):
outp.append(((header + blob) + b'\n'))
outp.append((ESC + b'2'))
self._raw(b''.join(outp))
def _image_send_graphics_data(self, m, fn, data) -> None:
header = self._int_low_high((len(data) + 2), 2)
self._raw((((((GS + b'(L') + header) + m) + fn) + data))
def qr(self, content, ec=QR_ECLEVEL_L, size=3, model=QR_MODEL_2, native=False, center=False, impl=None, image_arguments: Optional[dict]=None) -> None:
if (ec not in [QR_ECLEVEL_L, QR_ECLEVEL_M, QR_ECLEVEL_H, QR_ECLEVEL_Q]):
raise ValueError('Invalid error correction level')
if (not (1 <= size <= 16)):
raise ValueError('Invalid block size (must be 1-16)')
if (model not in [QR_MODEL_1, QR_MODEL_2, QR_MICRO]):
raise ValueError('Invalid QR model (must be one of QR_MODEL_1, QR_MODEL_2, QR_MICRO)')
if (content == ''):
return
if (not native):
if impl:
warnings.warn('Parameter impl is deprecated in favor of image_arguments and will be dropped in a future release.', DeprecationWarning)
if (not image_arguments):
image_arguments = {}
if impl:
image_arguments['impl'] = impl
if ('center' not in image_arguments):
image_arguments['center'] = center
if (model != QR_MODEL_2):
raise ValueError('Invalid QR model for qrlib rendering (must be QR_MODEL_2)')
python_qr_ec = {QR_ECLEVEL_H: qrcode.constants.ERROR_CORRECT_H, QR_ECLEVEL_L: qrcode.constants.ERROR_CORRECT_L, QR_ECLEVEL_M: qrcode.constants.ERROR_CORRECT_M, QR_ECLEVEL_Q: qrcode.constants.ERROR_CORRECT_Q}
qr_code = qrcode.QRCode(version=None, box_size=size, border=1, error_correction=python_qr_ec[ec])
qr_code.add_data(content)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert('RGB')
self.text('\n')
self.image(im, **image_arguments)
self.text('\n')
self.text('\n')
return
if center:
raise NotImplementedError('Centering not implemented for native QR rendering')
cn = b'1'
self._send_2d_code_data(six.int2byte(65), cn, (six.int2byte((48 + model)) + six.int2byte(0)))
self._send_2d_code_data(six.int2byte(67), cn, six.int2byte(size))
self._send_2d_code_data(six.int2byte(69), cn, six.int2byte((48 + ec)))
self._send_2d_code_data(six.int2byte(80), cn, content.encode('utf-8'), b'0')
self._send_2d_code_data(six.int2byte(81), cn, b'', b'0')
def _send_2d_code_data(self, fn, cn, data, m=b'') -> None:
if ((len(m) > 1) or (len(cn) != 1) or (len(fn) != 1)):
raise ValueError('cn and fn must be one byte each.')
header = self._int_low_high(((len(data) + len(m)) + 2), 2)
self._raw(((((((GS + b'(k') + header) + cn) + fn) + m) + data))
def _int_low_high(inp_number: int, out_bytes: int) -> bytes:
max_input = (256 << ((out_bytes * 8) - 1))
if (not (1 <= out_bytes <= 4)):
raise ValueError('Can only output 1-4 bytes')
if (not (0 <= inp_number <= max_input)):
raise ValueError(f'Number too large. Can only output up to {max_input} in {out_bytes} bytes')
outp = b''
for _ in range(0, out_bytes):
outp += six.int2byte((inp_number % 256))
inp_number //= 256
return outp
def charcode(self, code: str='AUTO') -> None:
if (code.upper() == 'AUTO'):
self.magic.force_encoding(False)
else:
self.magic.force_encoding(code)
def check_barcode(bc: str, code: str):
if (bc not in BARCODE_FORMATS):
return False
(bounds, regex) = BARCODE_FORMATS[bc]
return (any(((bound[0] <= len(code) <= bound[1]) for bound in bounds)) and re_match(regex, code))
def _dpi(self) -> int:
try:
dpi = int(self.profile.profile_data['media']['dpi'])
except (KeyError, TypeError, ValueError):
try:
px = self.profile.profile_data['media']['width']['pixels']
mm = self.profile.profile_data['media']['width']['mm']
mm -= 10
dpi = int((px / (mm / 25.4)))
except (KeyError, TypeError, ZeroDivisionError):
dpi = 180
print(f"No printer's DPI info was found: Defaulting to {dpi}.")
self.profile.profile_data['media']['dpi'] = dpi
return dpi
def barcode(self, code, bc, height: int=64, width: int=3, pos: str='BELOW', font: str='A', align_ct: bool=True, function_type=None, check: bool=True, force_software: Union[(bool, str)]=False) -> None:
hw_modes = ['barcodeA', 'barcodeB']
sw_modes = ['graphics', 'bitImageColumn', 'bitImageRaster']
capable = {'hw': ([mode for mode in hw_modes if self.profile.supports(mode)] or None), 'sw': ([mode for mode in sw_modes if self.profile.supports(mode)] or None)}
if (((not capable['hw']) and (not capable['sw'])) or ((not capable['sw']) and force_software)):
raise BarcodeTypeError(f"Profile {self.profile.profile_data['name']} - hw barcode: {capable['hw']}, sw barcode: {capable['sw']}")
bc_alnum = ''.join([char for char in bc.upper() if char.isalnum()])
capable_bc = {'hw': HW_BARCODE_NAMES.get(bc_alnum), 'sw': SW_BARCODE_NAMES.get(bc_alnum)}
if (not any([*capable_bc.values()])):
raise BarcodeTypeError(f'Not supported or wrong barcode name {bc}.')
if (force_software or (not capable['hw']) or (not capable_bc['hw'])):
assert (capable['sw'] is not None)
impl = capable['sw'][0]
if ((force_software in capable['sw']) and isinstance(force_software, str)):
impl = force_software
print(f'Using {impl} software barcode renderer')
bc = (capable_bc['sw'] or bc)
mmxpt = (25.4 / self._dpi())
self._sw_barcode(bc, code, impl=impl, module_height=(height * mmxpt), module_width=(width * mmxpt), text_distance=3, font_size=9, center=align_ct)
return
print('Using hardware barcode renderer')
bc = (capable_bc['hw'] or bc)
self._hw_barcode(code, bc, height, width, pos, font, align_ct, function_type, check)
def _hw_barcode(self, code: str, bc: str, height: int=64, width: int=3, pos: str='BELOW', font: str='A', align_ct: bool=True, function_type: Optional[str]=None, check: bool=True) -> None:
ft_guess = [ft for ft in ['A', 'B'] if (bc in BARCODE_TYPES.get(ft, {'': b''}))]
ft_guess = (ft_guess or [''])
function_type = (function_type or ft_guess[0])
if ((not function_type) or (not BARCODE_TYPES.get(function_type.upper()))):
raise BarcodeTypeError(f"Barcode '{bc}' not valid for barcode function type {function_type}")
bc_types = BARCODE_TYPES[function_type.upper()]
if (check and (not self.check_barcode(bc, code))):
raise BarcodeCodeError(f"Barcode '{code}' not in a valid format for type '{bc}'")
if align_ct:
self._raw(TXT_STYLE['align']['center'])
if (1 <= height <= 255):
self._raw((BARCODE_HEIGHT + six.int2byte(height)))
else:
raise BarcodeSizeError(f'height = {height}')
if (2 <= width <= 6):
self._raw((BARCODE_WIDTH + six.int2byte(width)))
else:
raise BarcodeSizeError(f'width = {width}')
if (font.upper() == 'B'):
self._raw(BARCODE_FONT_B)
else:
self._raw(BARCODE_FONT_A)
if (pos.upper() == 'OFF'):
self._raw(BARCODE_TXT_OFF)
elif (pos.upper() == 'BOTH'):
self._raw(BARCODE_TXT_BTH)
elif (pos.upper() == 'ABOVE'):
self._raw(BARCODE_TXT_ABV)
else:
self._raw(BARCODE_TXT_BLW)
self._raw(bc_types[bc.upper()])
if (function_type.upper() == 'B'):
self._raw(six.int2byte(len(code)))
if code:
self._raw(code.encode())
else:
raise BarcodeCodeError()
if (function_type.upper() == 'A'):
self._raw(NUL)
def _sw_barcode(self, barcode_type: str, data: str, impl: str='bitImageColumn', module_height: Union[(int, float)]=5, module_width: Union[(int, float)]=0.2, text_distance: Union[(int, float)]=5, font_size: int=10, center: bool=True):
image_writer = ImageWriter()
if (barcode_type not in barcode.PROVIDED_BARCODES):
raise BarcodeTypeError(f'Barcode type {barcode_type} not supported by software barcode renderer')
barcode_class = barcode.get_barcode_class(barcode_type)
my_code = barcode_class(data, writer=image_writer)
my_code.render(writer_options={'module_height': module_height, 'module_width': module_width, 'quiet_zone': 0, 'text_distance': text_distance, 'font_size': font_size, 'dpi': self._dpi()})
image = my_code.writer._image
self.image(image, impl=impl, center=center)
def text(self, txt: str) -> None:
self.magic.write(str(txt))
def textln(self, txt: str='') -> None:
self.text(f'''{txt}
''')
def ln(self, count: int=1) -> None:
if (count < 0):
raise ValueError('Count cannot be lesser than 0')
if (count > 0):
self.text(('\n' * count))
def block_text(self, txt, font='0', columns=None) -> None:
col_count = (self.profile.get_columns(font) if (columns is None) else columns)
self.text(textwrap.fill(txt, col_count))
def set(self, align: Optional[str]=None, font: Optional[str]=None, bold: Optional[bool]=None, underline: Optional[int]=None, width: Optional[int]=None, height: Optional[int]=None, density: Optional[int]=None, invert: Optional[bool]=None, smooth: Optional[bool]=None, flip: Optional[bool]=None, normal_textsize: Optional[bool]=None, double_width: Optional[bool]=None, double_height: Optional[bool]=None, custom_size: Optional[bool]=None) -> None:
if custom_size:
if (isinstance(width, int) and isinstance(height, int) and (1 <= width <= 8) and (1 <= height <= 8)):
size_byte = (TXT_STYLE['width'][width] + TXT_STYLE['height'][height])
self._raw((TXT_SIZE + six.int2byte(size_byte)))
else:
raise SetVariableError()
elif (normal_textsize or double_height or double_width):
self._raw(TXT_NORMAL)
if (double_width and double_height):
self._raw(TXT_STYLE['size']['2x'])
elif double_width:
self._raw(TXT_STYLE['size']['2w'])
elif double_height:
self._raw(TXT_STYLE['size']['2h'])
else:
self._raw(TXT_STYLE['size']['normal'])
else:
pass
if (flip is not None):
self._raw(TXT_STYLE['flip'][flip])
if (smooth is not None):
self._raw(TXT_STYLE['smooth'][smooth])
if (bold is not None):
self._raw(TXT_STYLE['bold'][bold])
if (underline is not None):
self._raw(TXT_STYLE['underline'][underline])
if (font is not None):
self._raw(SET_FONT(six.int2byte(self.profile.get_font(font))))
if (align is not None):
self._raw(TXT_STYLE['align'][align])
if ((density is not None) and (density != 9)):
self._raw(TXT_STYLE['density'][density])
if (invert is not None):
self._raw(TXT_STYLE['invert'][invert])
def set_with_default(self, align: Optional[str]='left', font: Optional[str]='a', bold: Optional[bool]=False, underline: Optional[int]=0, width: Optional[int]=1, height: Optional[int]=1, density: Optional[int]=9, invert: Optional[bool]=False, smooth: Optional[bool]=False, flip: Optional[bool]=False, double_width: Optional[bool]=False, double_height: Optional[bool]=False, custom_size: Optional[bool]=False) -> None:
normal_textsize = ((not custom_size) and (not double_width) and (not double_height))
self.set(align=align, font=font, bold=bold, underline=underline, width=width, height=height, density=density, invert=invert, smooth=smooth, flip=flip, normal_textsize=normal_textsize, double_width=double_width, double_height=double_height, custom_size=custom_size)
def line_spacing(self, spacing: Optional[int]=None, divisor: int=180) -> None:
if (spacing is None):
self._raw(LINESPACING_RESET)
return
if (divisor not in LINESPACING_FUNCS):
raise ValueError('divisor must be either 360, 180 or 60')
if ((divisor in [360, 180]) and (not (0 <= spacing <= 255))):
raise ValueError('spacing must be a int between 0 and 255 when divisor is 360 or 180')
if ((divisor == 60) and (not (0 <= spacing <= 85))):
raise ValueError('spacing must be a int between 0 and 85 when divisor is 60')
self._raw((LINESPACING_FUNCS[divisor] + six.int2byte(spacing)))
def cut(self, mode: str='FULL', feed: bool=True) -> None:
if (not feed):
self._raw((((GS + b'V') + six.int2byte(66)) + b'\x00'))
return
self.print_and_feed(6)
mode = mode.upper()
if (mode not in ('FULL', 'PART')):
raise ValueError("Mode must be one of ('FULL', 'PART')")
if (mode == 'PART'):
if self.profile.supports('paperPartCut'):
self._raw(PAPER_PART_CUT)
elif self.profile.supports('paperFullCut'):
self._raw(PAPER_FULL_CUT)
elif (mode == 'FULL'):
if self.profile.supports('paperFullCut'):
self._raw(PAPER_FULL_CUT)
elif self.profile.supports('paperPartCut'):
self._raw(PAPER_PART_CUT)
def cashdraw(self, pin) -> None:
if (pin == 2):
self._raw(CD_KICK_2)
elif (pin == 5):
self._raw(CD_KICK_5)
else:
try:
self._raw(CD_KICK_DEC_SEQUENCE(*pin))
except TypeError as err:
raise CashDrawerError(str(err))
def linedisplay_select(self, select_display: bool=False) -> None:
if select_display:
self._raw(LINE_DISPLAY_OPEN)
else:
self._raw(LINE_DISPLAY_CLOSE)
def linedisplay_clear(self) -> None:
self._raw(LINE_DISPLAY_CLEAR)
def linedisplay(self, text: str) -> None:
self.linedisplay_select(select_display=True)
self.linedisplay_clear()
self.text(text)
self.linedisplay_select(select_display=False)
def hw(self, hw: str) -> None:
if (hw.upper() == 'INIT'):
self._raw(HW_INIT)
elif (hw.upper() == 'SELECT'):
self._raw(HW_SELECT)
elif (hw.upper() == 'RESET'):
self._raw(HW_RESET)
else:
pass
def print_and_feed(self, n: int=1) -> None:
if (0 <= n <= 255):
self._raw(((ESC + b'd') + six.int2byte(n)))
else:
raise ValueError('n must be betwen 0 and 255')
def control(self, ctl: str, count: int=5, tab_size: int=8) -> None:
if (ctl.upper() == 'LF'):
self._raw(CTL_LF)
elif (ctl.upper() == 'FF'):
self._raw(CTL_FF)
elif (ctl.upper() == 'CR'):
self._raw(CTL_CR)
elif (ctl.upper() == 'HT'):
if (not ((0 <= count <= 32) and (1 <= tab_size <= 255) and ((count * tab_size) < 256))):
raise TabPosError()
else:
self._raw(CTL_SET_HT)
for iterator in range(1, count):
self._raw(six.int2byte((iterator * tab_size)))
self._raw(NUL)
elif (ctl.upper() == 'VT'):
self._raw(CTL_VT)
def panel_buttons(self, enable: bool=True) -> None:
if enable:
self._raw(PANEL_BUTTON_ON)
else:
self._raw(PANEL_BUTTON_OFF)
def query_status(self, mode: bytes) -> bytes:
self._raw(mode)
status = self._read()
return status
def is_online(self) -> bool:
status = self.query_status(RT_STATUS_ONLINE)
if (len(status) == 0):
return False
return (not (status[0] & RT_MASK_ONLINE))
def paper_status(self) -> int:
status = self.query_status(RT_STATUS_PAPER)
if (len(status) == 0):
return 2
if ((status[0] & RT_MASK_NOPAPER) == RT_MASK_NOPAPER):
return 0
if ((status[0] & RT_MASK_LOWPAPER) == RT_MASK_LOWPAPER):
return 1
if ((status[0] & RT_MASK_PAPER) == RT_MASK_PAPER):
return 2
return 0
def target(self, type: str='ROLL') -> None:
if (type.upper() == 'ROLL'):
self._raw(SHEET_ROLL_MODE)
elif (type.upper() == 'SLIP'):
self._raw(SHEET_SLIP_MODE)
else:
raise ValueError('Unsupported target')
def eject_slip(self) -> None:
self._raw(SLIP_EJECT)
def print_and_eject_slip(self) -> None:
self._raw(SLIP_PRINT_AND_EJECT)
def use_slip_only(self) -> None:
self._raw(SLIP_SELECT)
def buzzer(self, times: int=2, duration: int=4) -> None:
if (not (1 <= times <= 9)):
raise ValueError('times must be between 1 and 9')
if (not (1 <= duration <= 9)):
raise ValueError('duration must be between 1 and 9')
self._raw(((BUZZER + six.int2byte(times)) + six.int2byte(duration))) |
class TweetCache(db.Entity):
tweet_id = PrimaryKey(int, size=64)
data = Required(Json)
blocked = Optional(bool, sql_default=False)
has_media = Optional(bool, sql_default=False)
created_at = Required(int, size=64, index=True)
_session
def fetch(tweet_id: int) -> typing.Optional['TweetCache']:
tweet = TweetCache.get(tweet_id=tweet_id)
if tweet:
log.debug(f'[SYSTEM] Tweet {tweet_id} cache hit')
return tweet
_session
def set(tweet: tweepy.models.Status, has_media: bool=False, blocked: bool=False) -> 'TweetCache':
cache = TweetCache.get(tweet_id=tweet.id)
if cache:
log.warning(f'[SYSTEM] Overwriting cache entry for tweet {tweet.id} early')
cache.delete()
commit()
cache = TweetCache(tweet_id=tweet.id, data=tweet._json, blocked=blocked, has_media=has_media, created_at=int(time.time()))
return cache
_session
def purge(cutoff=86400):
cutoff_ts = (int(time.time()) - cutoff)
stale_count = count((c for c in TweetCache if (c.created_at <= cutoff_ts)))
if stale_count:
delete((c for c in TweetCache if (c.created_at <= cutoff_ts)))
return stale_count
def tweet(self):
return tweepy.models.Status.parse(api, self.data) |
def freq_gauge(stdscr, pos_y, pos_x, size, freq_data):
name = (freq_data['name'] if ('name' in freq_data) else '')
curr_string = unit_to_string(freq_data['cur'], 'k', 'Hz')
if ('max' in freq_data):
value = ((((freq_data['cur'] - freq_data['min']) / (freq_data['max'] - freq_data['min'])) * 100) if (freq_data['min'] != freq_data['max']) else 100)
data = {'name': name, 'color': NColors.cyan(), 'online': (freq_data['online'] if ('online' in freq_data) else True), 'values': [(value, NColors.green())], 'mleft': (unit_to_string(freq_data['min'], 'k', 'Hz') if ('min' in freq_data) else ''), 'mright': (unit_to_string(freq_data['max'], 'k', 'Hz') if ('max' in freq_data) else '')}
basic_gauge(stdscr, pos_y, pos_x, (size - 8), data, bar=':')
stdscr.addstr(pos_y, ((pos_x + size) - 6), curr_string, NColors.italic())
else:
basic_gauge_simple(stdscr, pos_y, pos_x, size, freq_data) |
class _LazyConfigMapping(OrderedDict):
def __init__(self, mapping):
self._mapping = mapping
self._extra_content = {}
self._modules = {}
def __getitem__(self, key):
if (key in self._extra_content):
return self._extra_content[key]
if (key not in self._mapping):
raise KeyError(key)
value = self._mapping[key]
module_name = model_type_to_module_name(key)
if (module_name not in self._modules):
self._modules[module_name] = importlib.import_module(f'.{module_name}', 'transformers.models')
return getattr(self._modules[module_name], value)
def keys(self):
return (list(self._mapping.keys()) + list(self._extra_content.keys()))
def values(self):
return ([self[k] for k in self._mapping.keys()] + list(self._extra_content.values()))
def items(self):
return ([(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items()))
def __iter__(self):
return iter((list(self._mapping.keys()) + list(self._extra_content.keys())))
def __contains__(self, item):
return ((item in self._mapping) or (item in self._extra_content))
def register(self, key, value):
if (key in self._mapping.keys()):
raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.")
self._extra_content[key] = value |
def test_poetry_with_non_default_secondary_source(fixture_dir: FixtureDirGetter, with_simple_keyring: None) -> None:
poetry = Factory().create_poetry(fixture_dir('with_non_default_secondary_source'))
assert poetry.pool.has_repository('PyPI')
assert isinstance(poetry.pool.repository('PyPI'), PyPiRepository)
assert (poetry.pool.get_priority('PyPI') is Priority.DEFAULT)
assert poetry.pool.has_repository('foo')
assert isinstance(poetry.pool.repository('foo'), LegacyRepository)
assert ({repo.name for repo in poetry.pool.repositories} == {'PyPI', 'foo'}) |
class Effect3591(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Sensor Dampener')), 'maxTargetRangeBonus', (container.getModifiedItemAttr('scanSkillEwStrengthBonus') * level), **kwargs) |
class PerMessageDeflate(Extension):
name = ExtensionName('permessage-deflate')
def __init__(self, remote_no_context_takeover: bool, local_no_context_takeover: bool, remote_max_window_bits: int, local_max_window_bits: int, compress_settings: Optional[Dict[(Any, Any)]]=None) -> None:
if (compress_settings is None):
compress_settings = {}
assert (remote_no_context_takeover in [False, True])
assert (local_no_context_takeover in [False, True])
assert (8 <= remote_max_window_bits <= 15)
assert (8 <= local_max_window_bits <= 15)
assert ('wbits' not in compress_settings)
self.remote_no_context_takeover = remote_no_context_takeover
self.local_no_context_takeover = local_no_context_takeover
self.remote_max_window_bits = remote_max_window_bits
self.local_max_window_bits = local_max_window_bits
self.compress_settings = compress_settings
if (not self.remote_no_context_takeover):
self.decoder = zlib.decompressobj(wbits=(- self.remote_max_window_bits))
if (not self.local_no_context_takeover):
self.encoder = zlib.compressobj(wbits=(- self.local_max_window_bits), **self.compress_settings)
self.decode_cont_data = False
def __repr__(self) -> str:
return f'PerMessageDeflate(remote_no_context_takeover={self.remote_no_context_takeover}, local_no_context_takeover={self.local_no_context_takeover}, remote_max_window_bits={self.remote_max_window_bits}, local_max_window_bits={self.local_max_window_bits})'
def decode(self, frame: frames.Frame, *, max_size: Optional[int]=None) -> frames.Frame:
if (frame.opcode in frames.CTRL_OPCODES):
return frame
if (frame.opcode is frames.OP_CONT):
if (not self.decode_cont_data):
return frame
if frame.fin:
self.decode_cont_data = False
else:
if (not frame.rsv1):
return frame
frame = dataclasses.replace(frame, rsv1=False)
if (not frame.fin):
self.decode_cont_data = True
if self.remote_no_context_takeover:
self.decoder = zlib.decompressobj(wbits=(- self.remote_max_window_bits))
data = frame.data
if frame.fin:
data += _EMPTY_UNCOMPRESSED_BLOCK
max_length = (0 if (max_size is None) else max_size)
try:
data = self.decoder.decompress(data, max_length)
except zlib.error as exc:
raise exceptions.ProtocolError('decompression failed') from exc
if self.decoder.unconsumed_tail:
raise exceptions.PayloadTooBig(f'over size limit (? > {max_size} bytes)')
if (frame.fin and self.remote_no_context_takeover):
del self.decoder
return dataclasses.replace(frame, data=data)
def encode(self, frame: frames.Frame) -> frames.Frame:
if (frame.opcode in frames.CTRL_OPCODES):
return frame
if (frame.opcode is not frames.OP_CONT):
frame = dataclasses.replace(frame, rsv1=True)
if self.local_no_context_takeover:
self.encoder = zlib.compressobj(wbits=(- self.local_max_window_bits), **self.compress_settings)
data = (self.encoder.compress(frame.data) + self.encoder.flush(zlib.Z_SYNC_FLUSH))
if (frame.fin and data.endswith(_EMPTY_UNCOMPRESSED_BLOCK)):
data = data[:(- 4)]
if (frame.fin and self.local_no_context_takeover):
del self.encoder
return dataclasses.replace(frame, data=data) |
def dumpgen(data, only_str):
generator = genchunks(data, 16)
for (addr, d) in enumerate(generator):
line = ''
if (not only_str):
line = ('%08X: ' % (addr * 16))
dumpstr = dump(d)
line += dumpstr[:(8 * 3)]
if (len(d) > 8):
line += (' ' + dumpstr[(8 * 3):])
pad = 2
if (len(d) < 16):
pad += (3 * (16 - len(d)))
if (len(d) <= 8):
pad += 1
line += (' ' * pad)
for byte in d:
if (not PY3K):
byte = ord(byte)
if (32 <= byte <= 126):
line += chr(byte)
else:
line += '.'
(yield line) |
class ProphetNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names: List[str] = ['input_ids', 'attention_mask']
def __init__(self, vocab_file: str, do_lower_case: Optional[bool]=True, do_basic_tokenize: Optional[bool]=True, never_split: Optional[Iterable]=None, unk_token: Optional[str]='[UNK]', sep_token: Optional[str]='[SEP]', x_sep_token: Optional[str]='[X_SEP]', pad_token: Optional[str]='[PAD]', mask_token: Optional[str]='[MASK]', tokenize_chinese_chars: Optional[bool]=True, strip_accents: Optional[bool]=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, x_sep_token=x_sep_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
self.unique_no_split_tokens.append(x_sep_token)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token: str):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index: int):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: str):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: Optional[bool]=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([0] * len(token_ids_0)) + [1])
return (((([0] * len(token_ids_0)) + [1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
if (token_ids_1 is None):
return (len((token_ids_0 + sep)) * [0])
return ((len((token_ids_0 + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (token_ids_0 + [self.sep_token_id])
sep = [self.sep_token_id]
return (((token_ids_0 + sep) + token_ids_1) + sep) |
class PageFactory(DjangoModelFactory):
class Meta():
model = Page
django_get_or_create = ('path',)
title = factory.Faker('sentence', nb_words=5)
path = factory.LazyAttribute((lambda o: slugify(o.title)))
content = factory.Faker('paragraph', nb_sentences=5)
creator = factory.SubFactory(UserFactory) |
def _migrate_old_base_preset_uuid(preset_manager: PresetManager, options: Options):
for (uuid, preset) in preset_manager.custom_presets.items():
if ((options.get_parent_for_preset(uuid) is None) and ((parent_uuid := preset.recover_old_base_uuid()) is not None)):
options.set_parent_for_preset(uuid, parent_uuid) |
class ClapProcessor(ProcessorMixin):
feature_extractor_class = 'ClapFeatureExtractor'
tokenizer_class = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):
sampling_rate = kwargs.pop('sampling_rate', None)
if ((text is None) and (audios is None)):
raise ValueError('You have to specify either text or audios. Both cannot be none.')
if (text is not None):
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
if (audios is not None):
audio_features = self.feature_extractor(audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs)
if ((text is not None) and (audios is not None)):
encoding['input_features'] = audio_features.input_features
return encoding
elif (text is not None):
return encoding
else:
return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
feature_extractor_input_names = self.feature_extractor.model_input_names
return list(dict.fromkeys((tokenizer_input_names + feature_extractor_input_names))) |
class LinknetDecoder(nn.Module):
def __init__(self, encoder_channels, prefinal_channels=32, n_blocks=5, use_batchnorm=True):
super().__init__()
encoder_channels = encoder_channels[1:]
encoder_channels = encoder_channels[::(- 1)]
channels = (list(encoder_channels) + [prefinal_channels])
self.blocks = nn.ModuleList([DecoderBlock(channels[i], channels[(i + 1)], use_batchnorm=use_batchnorm) for i in range(n_blocks)])
def forward(self, *features):
features = features[1:]
features = features[::(- 1)]
x = features[0]
skips = features[1:]
for (i, decoder_block) in enumerate(self.blocks):
skip = (skips[i] if (i < len(skips)) else None)
x = decoder_block(x, skip)
return x |
('beeref.view.BeeGraphicsView.recalc_scene_rect')
('beeref.scene.BeeGraphicsScene.on_view_scale_change')
def test_scale(view_scale_mock, recalc_mock, view):
view.scale(3.3, 3.3)
view_scale_mock.assert_called_once_with()
recalc_mock.assert_called_once_with()
assert (view.get_scale() == 3.3) |
def curve_distance(w1, I1, w2, I2, discard_out_of_bounds=True):
norm_w1 = (np.max(w1) - np.min(w1))
norm_w2 = (np.max(w2) - np.min(w2))
norm_I1 = (np.max(I1) - np.min(I1))
norm_I2 = (np.max(I2) - np.min(I2))
dist = cdist(np.array(((w1 / norm_w1), (I1 / norm_I1))).T, np.array(((w2 / norm_w2), (I2 / norm_I2))).T).min(axis=1)
if discard_out_of_bounds:
b = np.logical_or((w1 < w2.min()), (w1 > w2.max()))
dist[b] = np.nan
return (w1, dist) |
def DBInMemory_test():
def rollback():
with sd_lock:
saveddata_session.rollback()
print('Creating database in memory')
from os.path import realpath, join, dirname, abspath
debug = False
gamedataCache = True
saveddataCache = True
gamedata_version = ''
gamedata_connectionstring = ('sqlite:///' + realpath(join(dirname(abspath(str(__file__))), '..', 'eve.db')))
saveddata_connectionstring = 'sqlite:///:memory:'
class ReadOnlyException(Exception):
pass
if callable(gamedata_connectionstring):
gamedata_engine = create_engine('sqlite://', creator=gamedata_connectionstring, echo=debug)
else:
gamedata_engine = create_engine(gamedata_connectionstring, echo=debug)
gamedata_meta = MetaData()
gamedata_meta.bind = gamedata_engine
gamedata_session = sessionmaker(bind=gamedata_engine, autoflush=False, expire_on_commit=False)()
try:
gamedata_version = gamedata_session.execute("SELECT `field_value` FROM `metadata` WHERE `field_name` LIKE 'client_build'").fetchone()[0]
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print('Missing gamedata version.')
gamedata_version = None
if (saveddata_connectionstring is not None):
if callable(saveddata_connectionstring):
saveddata_engine = create_engine(creator=saveddata_connectionstring, echo=debug)
else:
saveddata_engine = create_engine(saveddata_connectionstring, echo=debug)
saveddata_meta = MetaData()
saveddata_meta.bind = saveddata_engine
saveddata_session = sessionmaker(bind=saveddata_engine, autoflush=False, expire_on_commit=False)()
else:
saveddata_meta = None
sd_lock = threading.Lock()
if (saveddata_connectionstring == 'sqlite:///:memory:'):
saveddata_meta.create_all()
print(saveddata_engine)
print(gamedata_engine)
helper = {'gamedata_session': gamedata_session, 'saveddata_session': saveddata_session}
return helper |
_time('2020-02-02')
.parametrize('test_input, expected', [(NOW_UTC, 'now'), ((NOW_UTC - dt.timedelta(seconds=1)), 'a second ago'), ((NOW_UTC - dt.timedelta(seconds=30)), '30 seconds ago'), ((NOW_UTC - dt.timedelta(minutes=1, seconds=30)), 'a minute ago'), ((NOW_UTC - dt.timedelta(minutes=2)), '2 minutes ago'), ((NOW_UTC - dt.timedelta(hours=1, minutes=30, seconds=30)), 'an hour ago'), ((NOW_UTC - dt.timedelta(hours=23, minutes=50, seconds=50)), '23 hours ago'), ((NOW_UTC - dt.timedelta(days=1)), 'a day ago'), ((NOW_UTC - dt.timedelta(days=500)), '1 year, 4 months ago'), ((NOW_UTC - dt.timedelta(days=((365 * 2) + 35))), '2 years ago'), ((NOW_UTC + dt.timedelta(seconds=1)), 'a second from now'), ((NOW_UTC + dt.timedelta(seconds=30)), '30 seconds from now'), ((NOW_UTC + dt.timedelta(minutes=1, seconds=30)), 'a minute from now'), ((NOW_UTC + dt.timedelta(minutes=2)), '2 minutes from now'), ((NOW_UTC + dt.timedelta(hours=1, minutes=30, seconds=30)), 'an hour from now'), ((NOW_UTC + dt.timedelta(hours=23, minutes=50, seconds=50)), '23 hours from now'), ((NOW_UTC + dt.timedelta(days=1)), 'a day from now'), ((NOW_UTC + dt.timedelta(days=500)), '1 year, 4 months from now'), ((NOW_UTC + dt.timedelta(days=((365 * 2) + 35))), '2 years from now'), ((NOW_UTC + dt.timedelta(days=10000)), '27 years from now'), ((NOW_UTC - dt.timedelta(days=(365 + 35))), '1 year, 1 month ago'), ((NOW_UTC - dt.timedelta(days=((365 * 2) + 65))), '2 years ago'), ((NOW_UTC - dt.timedelta(days=(365 + 4))), '1 year, 4 days ago')])
def test_naturaltime_timezone_when(test_input: dt.datetime, expected: str) -> None:
assert (humanize.naturaltime(test_input, when=NOW_UTC) == expected) |
class CatalogSearch(Snuffling):
def help(self):
return '\n<html>\n<head>\n<style type="text/css">\nbody { margin-left:10px };\n</style>\n</head>\n<body>\n <h1 align="center">Catalog Search</h1>\n<p>\n Retrieve event data from online catalogs.\n</p>\n <b>Parameters:</b><br />\n <b>· Catalog</b> - Online database to search for events.<br />\n <b>· Min Magnitude</b> -\n Only consider events with magnitude greater than chosen..<br />\n</p>\n<p>\n Data from the folowing catalogs can be retrieved:<br />\n ·\n <a href=" />\n ·\n <a href=" CMT</a><br />\n ·\n <a href=" />\n</p>\n<p>\n The USGS catalog allows to destinguish between \'Preliminary\n Determination of Epicenters\' (PDE) and \'Quick Epicenters Determination\'\n (PDE-Q). Latter one includes events of approximately the last six\n weeks. For detailed information about both catalog versions have a look\n at <a href=" Preliminary Determination of Epicenters (PDE) Bulletin\'</a>.\n</p>\n</body>\n</html>\n '
def setup(self):
self.catalogs = {'GEOFON': catalog.Geofon(), 'USGS/NEIC US': catalog.USGS('us'), 'Global-CMT': catalog.GlobalCMT(), 'Saxony (Uni-Leipzig)': catalog.Saxony()}
fdsn_has_events = ['ISC', 'SCEDC', 'NCEDC', 'IRIS', 'GEONET']
catkeys = sorted(self.catalogs.keys())
catkeys.extend(fdsn_has_events)
self.set_name('Catalog Search')
self.add_parameter(Choice('Catalog', 'catalog', catkeys[0], catkeys))
self.add_parameter(Param('Min Magnitude', 'magmin', 0, 0, 10))
self.set_live_update(False)
def call(self):
viewer = self.get_viewer()
(tmin, tmax) = viewer.get_time_range()
cat = self.catalogs.get(self.catalog, None)
if cat:
event_names = cat.get_event_names(time_range=(tmin, tmax), magmin=self.magmin)
for event_name in event_names:
event = cat.get_event(event_name)
marker = EventMarker(event)
self.add_markers([marker])
else:
request = fdsn.event(starttime=tmin, endtime=tmax, site=self.catalog.lower(), minmagnitude=self.magmin)
qml = quakeml.QuakeML.load_xml(request)
events = qml.get_pyrocko_events()
for event in events:
marker = EventMarker(event)
self.add_markers([marker]) |
def test__contact_and_muscle_forces_example():
from bioptim.examples.muscle_driven_with_contact import contact_forces_inequality_constraint_muscle as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/2segments_4dof_2contacts_1muscle.bioMod'), phase_time=0.3, n_shooting=10, min_bound=50, max_bound=np.inf, expand_dynamics=False) |
class CoreStage(_LooksStage, _Sound, _Events, _Control, _Operators, _Sensing, _Variables):
def __init__(self, name='Welcome to pyStage!', width=480, height=360):
pygame_major = int(pygame.ver.split('.')[0])
if (pygame_major < 2):
print('pygame version 2 or higher is required for PyStage.')
print('Update with the following command:')
print('pip install --upgrade pygame')
sys.exit(1)
self.stage = self
super().__init__()
self.facade = None
self.sprite_facade_class = None
self.message_broker = MessageBroker(self)
self.input_manager = InputManager(self)
pygame.init()
pygame.display.set_caption(name)
self.running = False
self.FPS = 60
self.dt = 0
self.sprites = SpriteGroup()
self.visible_sprites = SpriteGroup()
self.bubbles = pygame.sprite.Group()
self.visible_bubbles = pygame.sprite.Group()
self.monitor_group = pygame.sprite.Group()
self.pen_images = {}
self.background_color = (255, 255, 255)
self.surface = pygame.Surface([width, height], flags=pygame.SRCALPHA)
self.screen = pygame.display.set_mode([width, height], pygame.RESIZABLE)
self.clock = pygame.time.Clock()
self.width = width
self.height = height
self.rect = pygame.rect.Rect(0, 0, width, height)
self.center_x = int((width / 2))
self.center_y = int((height / 2))
self.scale_factor = 1
self.offset_x = 0
self.offset_y = 0
self.timer = 0
self.show_sprite_boundaries = ('--show-sprite-boundaries' in sys.argv)
def pystage_createsprite(self, costume='default'):
sprite = CoreSprite(self, costume)
self.sprites.add(sprite)
self._update_visible()
if self.sprite_facade_class:
return self.sprite_facade_class(sprite)
else:
return sprite
def _update_visible(self):
self.visible_sprites.empty()
self.visible_bubbles.empty()
for sprite in self.sprites:
assert isinstance(sprite, CoreSprite)
if sprite.visible:
self.visible_sprites.add(sprite)
if sprite.bubble_manager.bubble:
self.visible_bubbles.add(sprite.bubble_manager.bubble)
def _update(self, dt):
self.code_manager._update(dt)
self._update_visible()
def _draw(self, surface: pygame.Surface):
surface.fill(self.background_color)
image = self.costume_manager.get_image()
if (not image):
return
surface.blit(image, (0, 0))
def pystage_play(self):
self.running = True
dt = 0
left_clicking_and_holding = False
clicked_sprite = None
while self.running:
for event in pygame.event.get():
if (event.type == pygame.QUIT):
self.running = False
if (event.type == pygame.KEYDOWN):
if self.input_manager.is_active():
self.input_manager.process_key(event)
else:
for sprite in self.sprites:
assert isinstance(sprite, CoreSprite)
sprite.code_manager.process_key_pressed(event.key)
if (event.type == pygame.MOUSEBUTTONDOWN):
pos = pygame.Vector2(pygame.mouse.get_pos())
for sprite in self.visible_sprites.sprites()[(- 1)::(- 1)]:
assert isinstance(sprite, CoreSprite)
if sprite.rect.collidepoint(pos):
internal_pos = (pos - sprite.rect.topleft)
x = int(internal_pos.x)
y = int(internal_pos.y)
color = sprite.image.get_at((x, y))
if (color.a == 0):
continue
sprite.code_manager.process_click()
break
if ((event.type == pygame.MOUSEBUTTONDOWN) and (not left_clicking_and_holding) and pygame.mouse.get_pressed()[0]):
pos = pygame.mouse.get_pos()
for sprite in filter((lambda s: s.draggable), self.visible_sprites.sprites()[(- 1)::(- 1)]):
assert isinstance(sprite, CoreSprite)
sprite_rect = sprite.image.get_rect()
sprite_rect.topleft = sprite.rect.topleft
if sprite_rect.collidepoint(pos):
clicked_pos = ((pos[0] - sprite_rect.left), (pos[1] - sprite_rect.top))
if (sprite.image.get_at(clicked_pos).a != 0):
left_clicking_and_holding = True
clicked_sprite = sprite
if ((not pygame.mouse.get_pressed()[0]) and left_clicking_and_holding):
left_clicking_and_holding = False
if left_clicking_and_holding:
assert isinstance(clicked_sprite, CoreSprite)
clicked_sprite.motion_goto_pointer()
for message in self.message_broker.get_messages():
for sprite in self.sprites:
assert isinstance(sprite, CoreSprite)
sprite.code_manager.process_broadcast(message)
self.message_broker.mark_completed()
self._update(dt)
self.sprites.update(dt)
self.bubbles.update()
self.input_manager.update(dt)
self.monitor_group.update()
self._draw(self.surface)
for sprite in self.pen_images:
image = self.pen_images[sprite]
self.surface.blit(image, (0, 0))
self.visible_sprites.draw(self.surface)
self.bubbles.draw(self.surface)
self.input_manager.draw(self.surface)
self.monitor_group.draw(self.surface)
factor_x = (self.screen.get_width() / self.surface.get_width())
factor_y = (self.screen.get_height() / self.surface.get_height())
self.scale_factor = min(factor_x, factor_y)
scaled = pygame.transform.smoothscale(self.surface, (int((self.surface.get_width() * self.scale_factor)), int((self.surface.get_height() * self.scale_factor))))
self.offset_x = int(((self.screen.get_width() - scaled.get_width()) / 2))
self.offset_y = int(((self.screen.get_height() - scaled.get_height()) / 2))
self.screen.blit(scaled, (self.offset_x, self.offset_y))
pygame.display.flip()
dt = (self.clock.tick(self.FPS) / 1000)
self.timer += dt
pygame.quit() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.